From f3c578e3a4faabf18b3b3bde85ee34f24abdb15b Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 18 Jun 2019 13:06:34 -0400 Subject: [PATCH 001/335] Added basic scaffolding for APIClient and Job classes and tests --- strawberryfields/api_client.py | 101 ++++++++++++++++++++++++++++ tests/api_client/test_api_client.py | 80 ++++++++++++++++++++++ tests/pytest.ini | 1 + 3 files changed, 182 insertions(+) create mode 100644 strawberryfields/api_client.py create mode 100644 tests/api_client/test_api_client.py diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py new file mode 100644 index 000000000..0a07466a9 --- /dev/null +++ b/strawberryfields/api_client.py @@ -0,0 +1,101 @@ +from urllib.parse import urljoin +import requests + + +class APIClient: + ALLOWED_BASE_URLS = [ + 'localhost', + ] + DEFAULT_BASE_URL = 'localhost/' + CONFIGURATION_PATH = '' + + def __init__(self, *args, **kwargs): + # TODO: Load username, password, or authentication token from + # configuration file + + self.USE_SSL = kwargs.get('use_ssl', True) + self.AUTHENTICATION_TOKEN = kwargs.get('authentication_token', '') + self.HEADERS = {} + + if 'headers' in kwargs: + self.HEADERS.update(kwargs['headers']) + + if 'base_url' in kwargs: + base_url = kwargs['base_url'] + if base_url in self.ALLOWED_BASE_URLS: + self.BASE_URL = base_url + else: + raise ValueError('base_url parameter not in allowed list') + else: + self.BASE_URL = self.DEFAULT_BASE_URL + + def load_configuration(self): + raise NotImplementedError() + + def authenticate(self, username, password): + ''' + Retrieve an authentication token from the server via username + and password authentication. + ''' + raise NotImplementedError() + + def set_authorization_header(self, authentication_token): + self.headers['Authorization'] = authentication_token + + def join_path(self, path): + return urljoin(self.BASE_URL, path) + + def get(self, path): + return requests.get( + url=self.join_path(path), headers=self.headers) + + def post(self, path, payload): + return requests.post( + url=self.join_path(path), headers=self.headers, data=payload) + + +class Job: + RESOURCE_PATH = 'jobs/' + FIELDS = { + "id": int, + "status": str, + "result_url": str, + "circuit_url": str, + "created_at": str, + "started_at": str, + "finished_at": str, + "running_time": str, + } + + def __init__(self, client=None, id=None, *args, **kwargs): + if client is None: + client = APIClient() + + self.client = client + + if id is not None: + self.get(id) + + def join_path(self, path): + return urljoin(self.RESOURCE_PATH, path) + + def update_job(self, data): + for key in self.FIELDS: + setattr(self, key, self.FIELDS[key](data.get(key))) + + def get(self, job_id): + response = self.client.get(self.join_path(str(job_id))) + if response.status_code == requests.status_codes.OK: + self.update_job(response.json()) + else: + # TODO: handle errors + raise Exception(response.status_code) + + def create(self, params): + # TODO do basic validation + response = self.client.post(self.RESOURCE_PATH, params) + if response.status_code == requests.status_codes.CREATED: + self.update_job(response.json()) + else: + # TODO: handle errors + raise Exception(response.status_code) diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py new file mode 100644 index 000000000..abba093e8 --- /dev/null +++ b/tests/api_client/test_api_client.py @@ -0,0 +1,80 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r""" +Unit tests for API client +""" + +import pytest +from strawberryfields import api_client + + +@pytest.fixture +def client(): + return api_client.APIClient() + + +@pytest.mark.api_client +class TestAPIClient: + def test_init_default_client(self): + client = api_client.APIClient() + assert client.USE_SSL is True + assert client.AUTHENTICATION_TOKEN == '' + assert client.BASE_URL == 'localhost/' + assert client.HEADERS == {} + + def test_init_custom_token_client(self): + test_token = 'TEST' + client = api_client.APIClient(authentication_token=test_token) + assert client.AUTHENTICATION_TOKEN == test_token + + def test_load_configuration(self, client): + with pytest.raises(NotImplementedError): + client.load_configuration() + + def test_authenticate(self, client): + with pytest.raises(NotImplementedError): + username = 'TEST_USER' + password = 'TEST_PASSWORD' + client.authenticate(username, password) + + def test_set_authorization_header(self): + assert True + + def test_join_path(self, client): + assert client.join_path('jobs') == 'localhost/jobs' + + def test_get(self, client): + assert True + + def test_post(self, client): + assert True + + +@pytest.mark.api_client +class TestJob: + def test_init(self): + assert True + + def test_get(self): + assert True + + def test_update_job(self): + assert True + + def test_create(self): + assert True + + def test_join_path(self): + assert True diff --git a/tests/pytest.ini b/tests/pytest.ini index 249ced5fe..b23730eb2 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -2,3 +2,4 @@ markers = backends(name1, name2, ...): test applies to named backends only frontend: test applies to frontend only + api_client: test applies to API Client only From 0581c8b2981e4c1e07e6099b94528390c1a9ca75 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 10:02:25 -0400 Subject: [PATCH 002/335] Added more abstract Resource and ResourceManager classes, refactored. - Refactored APIClient to simplify instantiation - Added more abstract Resource class to be extended for each resource - Added ResourceManager class to manage the connectio with the API - Added docstrings - Updated tests and added preliminary tests for the `create` method --- strawberryfields/api_client.py | 203 +++++++++++++++++++++------- tests/api_client/test_api_client.py | 72 +++++++++- 2 files changed, 219 insertions(+), 56 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 0a07466a9..3ecbc2648 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -1,15 +1,41 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r""" +API Client library that interacts with the compute-service API over the HTTP +protocol. +""" + from urllib.parse import urljoin import requests +import json class APIClient: + ''' + An object that allows the user to connect to the compute-service API. + ''' ALLOWED_BASE_URLS = [ 'localhost', ] - DEFAULT_BASE_URL = 'localhost/' + DEFAULT_BASE_URL = 'localhost' CONFIGURATION_PATH = '' - def __init__(self, *args, **kwargs): + def __init__(self, use_ssl=True, base_url=None, *args, **kwargs): + ''' + Initialize the API client with various parameters. + ''' # TODO: Load username, password, or authentication token from # configuration file @@ -17,19 +43,18 @@ def __init__(self, *args, **kwargs): self.AUTHENTICATION_TOKEN = kwargs.get('authentication_token', '') self.HEADERS = {} - if 'headers' in kwargs: - self.HEADERS.update(kwargs['headers']) - - if 'base_url' in kwargs: - base_url = kwargs['base_url'] - if base_url in self.ALLOWED_BASE_URLS: - self.BASE_URL = base_url - else: - raise ValueError('base_url parameter not in allowed list') - else: + if not base_url: self.BASE_URL = self.DEFAULT_BASE_URL + elif base_url in self.ALLOWED_BASE_URLS: + self.BASE_URL = base_url + else: + raise ValueError('base_url parameter not in allowed list') def load_configuration(self): + ''' + Loads username, password, and/or authentication token from a config + file. + ''' raise NotImplementedError() def authenticate(self, username, password): @@ -40,62 +65,138 @@ def authenticate(self, username, password): raise NotImplementedError() def set_authorization_header(self, authentication_token): + ''' + Adds the authorization header to the headers dictionary to be included + with all API requests. + ''' self.headers['Authorization'] = authentication_token def join_path(self, path): - return urljoin(self.BASE_URL, path) + ''' + Joins a base url with an additional path (e.g. a resource name and ID) + ''' + return urljoin(f"{self.BASE_URL}/", path) def get(self, path): + ''' + Sends a GET request to the provided path. Returns a response object. + ''' return requests.get( - url=self.join_path(path), headers=self.headers) + url=self.join_path(path), headers=self.HEADERS) def post(self, path, payload): + ''' + Converts payload to a JSON string. Sends a POST request to the provided + path. Returns a response object. + ''' + data = json.dumps(payload) return requests.post( - url=self.join_path(path), headers=self.headers, data=payload) + url=self.join_path(path), headers=self.HEADERS, data=data) -class Job: - RESOURCE_PATH = 'jobs/' - FIELDS = { - "id": int, - "status": str, - "result_url": str, - "circuit_url": str, - "created_at": str, - "started_at": str, - "finished_at": str, - "running_time": str, - } - - def __init__(self, client=None, id=None, *args, **kwargs): - if client is None: - client = APIClient() - - self.client = client - - if id is not None: - self.get(id) +class ResourceManager: + def __init__(self, resource, client=None): + ''' + Initialize the manager with resource and client instances . A client + instance is used as a persistent HTTP communications object, and a + resource instance corresponds to a particular type of resource (e.g. + Job) + ''' + setattr(self, 'resource', resource) + setattr(self, 'client', client or APIClient()) def join_path(self, path): - return urljoin(self.RESOURCE_PATH, path) - - def update_job(self, data): - for key in self.FIELDS: - setattr(self, key, self.FIELDS[key](data.get(key))) + ''' + Joins a resource base path with an additional path (e.g. an ID) + ''' + return urljoin(f"{self.resource.PATH}/", path) def get(self, job_id): + ''' + Attempts to retrieve a particular record by sending a GET + request to the appropriate endpoint. If successful, the resource + object is populated with the data in the response. + ''' + if 'GET' not in self.resource.SUPPORTED_METHODS: + raise TypeError('GET method on this resource is not supported') + response = self.client.get(self.join_path(str(job_id))) - if response.status_code == requests.status_codes.OK: - self.update_job(response.json()) + if response.status_code == requests.status_codes.codes.OK: + self.refresh_data(response.json()) else: - # TODO: handle errors - raise Exception(response.status_code) + self.handle_error_response(response) def create(self, params): - # TODO do basic validation - response = self.client.post(self.RESOURCE_PATH, params) - if response.status_code == requests.status_codes.CREATED: - self.update_job(response.json()) + ''' + Attempts to create a new instance of a resource by sending a POST + request to the appropriate endpoint. + ''' + if 'POST' not in self.resource.SUPPORTED_METHODS: + raise TypeError('POST method on this resource is not supported') + + if getattr(self.resource, 'id', None) is not None: + raise TypeError('ID must be None when calling create') + + response = self.client.post(self.resource.PATH, params) + if response.status_code == 201: + self.refresh_data(response.json()) else: - # TODO: handle errors - raise Exception(response.status_code) + raise self.handle_error_response(response) + + def handle_error_response(self, response): + ''' + Handles an error response that is returned by the server. + ''' + + if response.status_code == 400: + pass + elif response.status_code == 401: + pass + elif response.status_code == 409: + pass + elif response.status_code in (500, 503, 504): + pass + + def refresh_data(self, data): + ''' + Refreshes the instance's attributes with the provided data and + converts it to the correct type. + ''' + + for key in self.resource.FIELDS: + if key in data and data[key] is not None: + setattr( + self.resource, key, self.resource.FIELDS[key](data[key])) + else: + setattr(self.resource, key, None) + + +class Resource: + ''' + A base class for an API resource. This class should be extended for each + resource endpoint. + ''' + SUPPORTED_METHODS = () + PATH = '' + FIELDS = {} + + def __init__(self): + self.manager = ResourceManager(self) + + +class Job(Resource): + ''' + The API resource corresponding to jobs. + ''' + SUPPORTED_METHODS = ('GET', 'POST') + PATH = 'jobs' + FIELDS = { + 'id': int, + 'status': str, + 'result_url': str, + 'circuit_url': str, + 'created_at': str, + 'started_at': str, + 'finished_at': str, + 'running_time': str, + } diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index abba093e8..e48bc588a 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -18,6 +18,9 @@ import pytest from strawberryfields import api_client +from strawberryfields.api_client import requests + +status_codes = requests.status_codes.codes @pytest.fixture @@ -25,13 +28,60 @@ def client(): return api_client.APIClient() +SAMPLE_JOB_CREATE_RESPONSE = { + "id": 29583, + "status": "queued", + "result_url": "https://platform.xanadu.ai/jobs/29583/result", + "circuit_url": "https://platform.xanadu.ai/jobs/29583/circuit", + "created_at": "2019-05-24T15:55:43.872531Z", + "started_at": None, + "finished_at": None, + "running_time": None, +} + + +class MockCreatedResponse: + possible_responses = { + 201: SAMPLE_JOB_CREATE_RESPONSE, + 400: { + "code": "parse-error", + "detail": ( + "The blackbird script could not be parsed. " + "Please fix errors in the script and try again.") + }, + 401: { + "code": "unauthenticated", + "detail": "Requires authentication" + }, + 409: { + "code": "unsupported-circuit", + "detail": ( + "This circuit is not compatible with the specified hardware.") + }, + 500: { + "code": "server-error", + "detail": ( + "Unexpected server error. Please try your request again " + "later.") + }, + } + + status_code = None + + def __init__(self, status_code): + self.status_code = status_code + + def json(self): + return self.possible_responses[self.status_code] + + @pytest.mark.api_client class TestAPIClient: def test_init_default_client(self): client = api_client.APIClient() assert client.USE_SSL is True assert client.AUTHENTICATION_TOKEN == '' - assert client.BASE_URL == 'localhost/' + assert client.BASE_URL == 'localhost' assert client.HEADERS == {} def test_init_custom_token_client(self): @@ -55,7 +105,7 @@ def test_set_authorization_header(self): def test_join_path(self, client): assert client.join_path('jobs') == 'localhost/jobs' - def test_get(self, client): + def test_get(self, client, monkeypatch): assert True def test_post(self, client): @@ -70,11 +120,23 @@ def test_init(self): def test_get(self): assert True - def test_update_job(self): + def test_refresh_data(self): assert True - def test_create(self): - assert True + def test_create(self, monkeypatch): + monkeypatch.setattr( + requests, + "post", + lambda url, headers, data: MockCreatedResponse(201)) + job = api_client.Job() + job.manager.create(params={}) + assert job.id == SAMPLE_JOB_CREATE_RESPONSE['id'] + assert job.status == SAMPLE_JOB_CREATE_RESPONSE['status'] + assert job.result_url == SAMPLE_JOB_CREATE_RESPONSE['result_url'] + assert job.created_at == SAMPLE_JOB_CREATE_RESPONSE['created_at'] + assert job.started_at == SAMPLE_JOB_CREATE_RESPONSE['started_at'] + assert job.finished_at == SAMPLE_JOB_CREATE_RESPONSE['finished_at'] + assert job.running_time == SAMPLE_JOB_CREATE_RESPONSE['running_time'] def test_join_path(self): assert True From 738f80a20b77943373dbf2c8fadbc23e74a00634 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 10:33:47 -0400 Subject: [PATCH 003/335] Refactor response handling, store status code on manager object --- strawberryfields/api_client.py | 149 +++++++++++++++------------- tests/api_client/test_api_client.py | 26 +++-- 2 files changed, 99 insertions(+), 76 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 3ecbc2648..26cf09f32 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -23,24 +23,23 @@ class APIClient: - ''' + """ An object that allows the user to connect to the compute-service API. - ''' - ALLOWED_BASE_URLS = [ - 'localhost', - ] - DEFAULT_BASE_URL = 'localhost' - CONFIGURATION_PATH = '' + """ + + ALLOWED_BASE_URLS = ["localhost"] + DEFAULT_BASE_URL = "localhost" + CONFIGURATION_PATH = "" def __init__(self, use_ssl=True, base_url=None, *args, **kwargs): - ''' + """ Initialize the API client with various parameters. - ''' + """ # TODO: Load username, password, or authentication token from # configuration file - self.USE_SSL = kwargs.get('use_ssl', True) - self.AUTHENTICATION_TOKEN = kwargs.get('authentication_token', '') + self.USE_SSL = kwargs.get("use_ssl", True) + self.AUTHENTICATION_TOKEN = kwargs.get("authentication_token", "") self.HEADERS = {} if not base_url: @@ -48,77 +47,75 @@ def __init__(self, use_ssl=True, base_url=None, *args, **kwargs): elif base_url in self.ALLOWED_BASE_URLS: self.BASE_URL = base_url else: - raise ValueError('base_url parameter not in allowed list') + raise ValueError("base_url parameter not in allowed list") def load_configuration(self): - ''' + """ Loads username, password, and/or authentication token from a config file. - ''' + """ raise NotImplementedError() def authenticate(self, username, password): - ''' + """ Retrieve an authentication token from the server via username and password authentication. - ''' + """ raise NotImplementedError() def set_authorization_header(self, authentication_token): - ''' + """ Adds the authorization header to the headers dictionary to be included with all API requests. - ''' - self.headers['Authorization'] = authentication_token + """ + self.headers["Authorization"] = authentication_token def join_path(self, path): - ''' + """ Joins a base url with an additional path (e.g. a resource name and ID) - ''' + """ return urljoin(f"{self.BASE_URL}/", path) def get(self, path): - ''' + """ Sends a GET request to the provided path. Returns a response object. - ''' - return requests.get( - url=self.join_path(path), headers=self.HEADERS) + """ + return requests.get(url=self.join_path(path), headers=self.HEADERS) def post(self, path, payload): - ''' + """ Converts payload to a JSON string. Sends a POST request to the provided path. Returns a response object. - ''' + """ data = json.dumps(payload) - return requests.post( - url=self.join_path(path), headers=self.HEADERS, data=data) + return requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) class ResourceManager: def __init__(self, resource, client=None): - ''' + """ Initialize the manager with resource and client instances . A client instance is used as a persistent HTTP communications object, and a resource instance corresponds to a particular type of resource (e.g. Job) - ''' - setattr(self, 'resource', resource) - setattr(self, 'client', client or APIClient()) + """ + setattr(self, "resource", resource) + setattr(self, "client", client or APIClient()) def join_path(self, path): - ''' + """ Joins a resource base path with an additional path (e.g. an ID) - ''' + """ return urljoin(f"{self.resource.PATH}/", path) def get(self, job_id): - ''' + """ Attempts to retrieve a particular record by sending a GET request to the appropriate endpoint. If successful, the resource object is populated with the data in the response. - ''' - if 'GET' not in self.resource.SUPPORTED_METHODS: - raise TypeError('GET method on this resource is not supported') + """ + if "GET" not in self.resource.SUPPORTED_METHODS: + raise TypeError("GET method on this resource is not supported") response = self.client.get(self.join_path(str(job_id))) if response.status_code == requests.status_codes.codes.OK: @@ -127,26 +124,41 @@ def get(self, job_id): self.handle_error_response(response) def create(self, params): - ''' + """ Attempts to create a new instance of a resource by sending a POST request to the appropriate endpoint. - ''' - if 'POST' not in self.resource.SUPPORTED_METHODS: - raise TypeError('POST method on this resource is not supported') + """ + if "POST" not in self.resource.SUPPORTED_METHODS: + raise TypeError("POST method on this resource is not supported") - if getattr(self.resource, 'id', None) is not None: - raise TypeError('ID must be None when calling create') + if getattr(self.resource, "id", None) is not None: + raise TypeError("ID must be None when calling create") response = self.client.post(self.resource.PATH, params) + + self.handle_response(response) + + def handle_response(self, response): + """ + Store the status code on the manager object and handle the response + based on the status code. + """ + self.http_status_code = response.status_code if response.status_code == 201: - self.refresh_data(response.json()) + self.handle_success_response(response) else: - raise self.handle_error_response(response) + self.handle_error_response(response) + + def handle_success_response(self, response): + """ + Handles a successful response by refreshing the instance fields. + """ + self.refresh_data(response.json()) def handle_error_response(self, response): - ''' + """ Handles an error response that is returned by the server. - ''' + """ if response.status_code == 400: pass @@ -158,26 +170,26 @@ def handle_error_response(self, response): pass def refresh_data(self, data): - ''' + """ Refreshes the instance's attributes with the provided data and converts it to the correct type. - ''' + """ for key in self.resource.FIELDS: if key in data and data[key] is not None: - setattr( - self.resource, key, self.resource.FIELDS[key](data[key])) + setattr(self.resource, key, self.resource.FIELDS[key](data[key])) else: setattr(self.resource, key, None) class Resource: - ''' + """ A base class for an API resource. This class should be extended for each resource endpoint. - ''' + """ + SUPPORTED_METHODS = () - PATH = '' + PATH = "" FIELDS = {} def __init__(self): @@ -185,18 +197,19 @@ def __init__(self): class Job(Resource): - ''' + """ The API resource corresponding to jobs. - ''' - SUPPORTED_METHODS = ('GET', 'POST') - PATH = 'jobs' + """ + + SUPPORTED_METHODS = ("GET", "POST") + PATH = "jobs" FIELDS = { - 'id': int, - 'status': str, - 'result_url': str, - 'circuit_url': str, - 'created_at': str, - 'started_at': str, - 'finished_at': str, - 'running_time': str, + "id": int, + "status": str, + "result_url": str, + "circuit_url": str, + "created_at": str, + "started_at": str, + "finished_at": str, + "running_time": str, } diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index e48bc588a..f60fcfcfd 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -74,6 +74,9 @@ def __init__(self, status_code): def json(self): return self.possible_responses[self.status_code] + def raise_for_status(self): + raise requests.exceptions.HTTPError() + @pytest.mark.api_client class TestAPIClient: @@ -123,20 +126,27 @@ def test_get(self): def test_refresh_data(self): assert True - def test_create(self, monkeypatch): + def test_create_created(self, monkeypatch): monkeypatch.setattr( requests, "post", lambda url, headers, data: MockCreatedResponse(201)) job = api_client.Job() job.manager.create(params={}) - assert job.id == SAMPLE_JOB_CREATE_RESPONSE['id'] - assert job.status == SAMPLE_JOB_CREATE_RESPONSE['status'] - assert job.result_url == SAMPLE_JOB_CREATE_RESPONSE['result_url'] - assert job.created_at == SAMPLE_JOB_CREATE_RESPONSE['created_at'] - assert job.started_at == SAMPLE_JOB_CREATE_RESPONSE['started_at'] - assert job.finished_at == SAMPLE_JOB_CREATE_RESPONSE['finished_at'] - assert job.running_time == SAMPLE_JOB_CREATE_RESPONSE['running_time'] + + keys_to_check = SAMPLE_JOB_CREATE_RESPONSE.keys() + for key in keys_to_check: + assert getattr(job, key) == SAMPLE_JOB_CREATE_RESPONSE[key] + + def test_create_bad_request(self, monkeypatch): + monkeypatch.setattr( + requests, + "post", + lambda url, headers, data: MockCreatedResponse(400)) + job = api_client.Job() + + job.manager.create(params={}) + assert job.manager.http_status_code == 400 def test_join_path(self): assert True From ddcf3c82d34336f56ea0475df0ff17512acedf47 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 12:18:20 -0400 Subject: [PATCH 004/335] Add more tests, fix handle_response to support create and get methods --- strawberryfields/api_client.py | 31 +++-- tests/api_client/test_api_client.py | 183 +++++++++++++++++++++++++++- 2 files changed, 199 insertions(+), 15 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 26cf09f32..1e4b8d5ed 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -17,11 +17,19 @@ protocol. """ -from urllib.parse import urljoin +import urllib import requests import json +class MethodNotSupportedException(TypeError): + pass + + +class ObjectAlreadyCreatedException(TypeError): + pass + + class APIClient: """ An object that allows the user to connect to the compute-service API. @@ -74,7 +82,7 @@ def join_path(self, path): """ Joins a base url with an additional path (e.g. a resource name and ID) """ - return urljoin(f"{self.BASE_URL}/", path) + return urllib.parse.urljoin(f"{self.BASE_URL}/", path) def get(self, path): """ @@ -106,7 +114,7 @@ def join_path(self, path): """ Joins a resource base path with an additional path (e.g. an ID) """ - return urljoin(f"{self.resource.PATH}/", path) + return urllib.parse.urljoin(f"{self.resource.PATH}/", path) def get(self, job_id): """ @@ -115,13 +123,10 @@ def get(self, job_id): object is populated with the data in the response. """ if "GET" not in self.resource.SUPPORTED_METHODS: - raise TypeError("GET method on this resource is not supported") + raise MethodNotSupportedException("GET method on this resource is not supported") response = self.client.get(self.join_path(str(job_id))) - if response.status_code == requests.status_codes.codes.OK: - self.refresh_data(response.json()) - else: - self.handle_error_response(response) + self.handle_response(response) def create(self, params): """ @@ -129,10 +134,10 @@ def create(self, params): request to the appropriate endpoint. """ if "POST" not in self.resource.SUPPORTED_METHODS: - raise TypeError("POST method on this resource is not supported") + raise MethodNotSupportedException("POST method on this resource is not supported") if getattr(self.resource, "id", None) is not None: - raise TypeError("ID must be None when calling create") + raise ObjectAlreadyCreatedException("ID must be None when calling create") response = self.client.post(self.resource.PATH, params) @@ -144,7 +149,7 @@ def handle_response(self, response): based on the status code. """ self.http_status_code = response.status_code - if response.status_code == 201: + if response.status_code in (200, 201): self.handle_success_response(response) else: self.handle_error_response(response) @@ -176,6 +181,8 @@ def refresh_data(self, data): """ for key in self.resource.FIELDS: + # TODO: treat everything as strings, and don't overload the fields + # parameter to also convert the values. if key in data and data[key] is not None: setattr(self.resource, key, self.resource.FIELDS[key](data[key])) else: @@ -203,6 +210,8 @@ class Job(Resource): SUPPORTED_METHODS = ("GET", "POST") PATH = "jobs" + + # TODO: change this to a flat list. FIELDS = { "id": int, "status": str, diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index f60fcfcfd..7d765c5a0 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -18,7 +18,14 @@ import pytest from strawberryfields import api_client -from strawberryfields.api_client import requests +from strawberryfields.api_client import ( + requests, + ResourceManager, + ObjectAlreadyCreatedException, + MethodNotSupportedException, +) + +from unittest.mock import MagicMock status_codes = requests.status_codes.codes @@ -39,8 +46,32 @@ def client(): "running_time": None, } +SAMPLE_JOB_RESPONSE = { + "id": 19856, + "status": "complete", + "result_url": "https://platform.xanadu.ai/jobs/19856/result", + "circuit_url": "https://platform.xanadu.ai/jobs/19856/circuit", + "created_at": "2019-05-24T15:55:43.872531Z", + "started_at": "2019-05-24T16:01:12.145636Z", + "finished_at": "2019-05-24T16:01:12.145645Z", + "running_time": "9µs" +} + -class MockCreatedResponse: +class MockResponse: + status_code = None + + def __init__(self, status_code): + self.status_code = status_code + + def json(self): + return self.possible_responses[self.status_code] + + def raise_for_status(self): + raise requests.exceptions.HTTPError() + + +class MockPOSTResponse(MockResponse): possible_responses = { 201: SAMPLE_JOB_CREATE_RESPONSE, 400: { @@ -66,6 +97,26 @@ class MockCreatedResponse: }, } + +class MockGETResponse(MockResponse): + possible_responses = { + 200: SAMPLE_JOB_RESPONSE, + 401: { + "code": "unauthenticated", + "detail": "Requires authentication" + }, + 404: { + "code": "", + "detail": "", + }, + 500: { + "code": "server-error", + "detail": ( + "Unexpected server error. Please try your request again " + "later.") + }, + } + status_code = None def __init__(self, status_code): @@ -115,6 +166,130 @@ def test_post(self, client): assert True +@pytest.mark.api_client +class TestResourceManager: + def test_init(self): + resource = MagicMock() + client = MagicMock() + manager = ResourceManager(resource, client) + + assert manager.resource == resource + assert manager.client == client + + def test_join_path(self): + mock_resource = MagicMock() + mock_resource.PATH = 'some-path' + + manager = ResourceManager(mock_resource, MagicMock()) + assert manager.join_path('test') == "some-path/test" + + def test_get_unsupported(self): + mock_resource = MagicMock() + mock_resource.SUPPORTED_METHODS = () + manager = ResourceManager(mock_resource, MagicMock()) + with pytest.raises(MethodNotSupportedException): + manager.get(1) + + def test_get(self, monkeypatch): + mock_resource = MagicMock() + mock_client = MagicMock() + mock_response = MagicMock() + mock_client.get = MagicMock(return_value=mock_response) + + mock_resource.SUPPORTED_METHODS = ('GET',) + + manager = ResourceManager(mock_resource, mock_client) + monkeypatch.setattr(manager, "handle_response", MagicMock()) + + manager.get(1) + + # TODO test that this is called with correct path + mock_client.get.assert_called_once() + manager.handle_response.assert_called_once_with(mock_response) + + def test_create_unsupported(self): + mock_resource = MagicMock() + mock_resource.SUPPORTED_METHODS = () + manager = ResourceManager(mock_resource, MagicMock()) + with pytest.raises(MethodNotSupportedException): + manager.create({}) + + def test_create_id_already_exists(self): + mock_resource = MagicMock() + mock_resource.SUPPORTED_METHODS = ('POST',) + mock_resource.id = MagicMock() + manager = ResourceManager(mock_resource, MagicMock()) + with pytest.raises(ObjectAlreadyCreatedException): + manager.create({}) + + def test_create(self, monkeypatch): + mock_resource = MagicMock() + mock_client = MagicMock() + mock_response = MagicMock() + mock_client.post = MagicMock(return_value=mock_response) + + mock_resource.SUPPORTED_METHODS = ('POST',) + mock_resource.id = None + + manager = ResourceManager(mock_resource, mock_client) + monkeypatch.setattr(manager, "handle_response", MagicMock()) + + manager.create({}) + + # TODO test that this is called with correct path and params + mock_client.post.assert_called_once() + manager.handle_response.assert_called_once_with(mock_response) + + def test_handle_response(self, monkeypatch): + mock_resource = MagicMock() + mock_client = MagicMock() + mock_response = MagicMock() + mock_handle_success_response = MagicMock() + mock_handle_error_response = MagicMock() + + manager = ResourceManager(mock_resource, mock_client) + + monkeypatch.setattr( + manager, "handle_success_response", mock_handle_success_response) + + monkeypatch.setattr( + manager, "handle_error_response", mock_handle_error_response) + + manager.handle_response(mock_response) + assert manager.http_status_code == mock_response.status_code + mock_handle_error_response.assert_called_once_with(mock_response) + + mock_response.status_code = 200 + manager.handle_response(mock_response) + mock_handle_success_response.assert_called_once_with(mock_response) + + def test_handle_refresh_data(self): + mock_resource = MagicMock() + mock_client = MagicMock() + + fields = ( + "id", + "status", + "result_url", + "circuit_url", + "created_at", + "started_at", + "finished_at", + "running_time", + ) + + mock_resource.FIELDS = {f: MagicMock() for f in fields} + mock_data = {f: MagicMock() for f in fields} + + manager = ResourceManager(mock_resource, mock_client) + + manager.refresh_data(mock_data) + + for key, value in mock_resource.FIELDS.items(): + value.assert_called_once() + # TODO: test that the attributes on the resource were set correctly + + @pytest.mark.api_client class TestJob: def test_init(self): @@ -130,7 +305,7 @@ def test_create_created(self, monkeypatch): monkeypatch.setattr( requests, "post", - lambda url, headers, data: MockCreatedResponse(201)) + lambda url, headers, data: MockPOSTResponse(201)) job = api_client.Job() job.manager.create(params={}) @@ -142,7 +317,7 @@ def test_create_bad_request(self, monkeypatch): monkeypatch.setattr( requests, "post", - lambda url, headers, data: MockCreatedResponse(400)) + lambda url, headers, data: MockPOSTResponse(400)) job = api_client.Job() job.manager.create(params={}) From 12e803b367331f5e6eeacb29db3ef5ebf059f91d Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 12:42:15 -0400 Subject: [PATCH 005/335] Clean up --- strawberryfields/api_client.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 1e4b8d5ed..b82b9e6e9 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -17,16 +17,26 @@ protocol. """ -import urllib -import requests import json +import requests +import urllib class MethodNotSupportedException(TypeError): + """ + Exception to be raised when a ResourceManager method is not supported for a + particular Resource. + """ + pass class ObjectAlreadyCreatedException(TypeError): + """ + Exception to be raised when an object has already been created but the user + is attempting to create it again. + """ + pass @@ -39,14 +49,14 @@ class APIClient: DEFAULT_BASE_URL = "localhost" CONFIGURATION_PATH = "" - def __init__(self, use_ssl=True, base_url=None, *args, **kwargs): + def __init__(self, use_ssl=True, base_url=None, **kwargs): """ Initialize the API client with various parameters. """ # TODO: Load username, password, or authentication token from # configuration file - self.USE_SSL = kwargs.get("use_ssl", True) + self.USE_SSL = use_ssl self.AUTHENTICATION_TOKEN = kwargs.get("authentication_token", "") self.HEADERS = {} @@ -100,6 +110,10 @@ def post(self, path, payload): class ResourceManager: + """ + This class handles all interactions with APIClient by the resource. + """ + def __init__(self, resource, client=None): """ Initialize the manager with resource and client instances . A client @@ -109,6 +123,7 @@ def __init__(self, resource, client=None): """ setattr(self, "resource", resource) setattr(self, "client", client or APIClient()) + setattr(self, "http_status_code", None) def join_path(self, path): """ From 0a5f5a15209a086f3075569801b0796645c65599 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 13:19:53 -0400 Subject: [PATCH 006/335] Clean up to fix CodeFactor issues --- strawberryfields/api_client.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index b82b9e6e9..61d8b80c9 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -17,9 +17,10 @@ protocol. """ +import urllib + import json import requests -import urllib class MethodNotSupportedException(TypeError): @@ -114,6 +115,8 @@ class ResourceManager: This class handles all interactions with APIClient by the resource. """ + http_status_code = None + def __init__(self, resource, client=None): """ Initialize the manager with resource and client instances . A client @@ -123,7 +126,6 @@ def __init__(self, resource, client=None): """ setattr(self, "resource", resource) setattr(self, "client", client or APIClient()) - setattr(self, "http_status_code", None) def join_path(self, path): """ From 60e7856e1505ee1fe553ed653a1669d37e116e16 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 14:32:36 -0400 Subject: [PATCH 007/335] Refactor Job.fields to use new Field class This class was added to facilitate tracking a resource's fields as well as provide a convenient clean method. --- strawberryfields/api_client.py | 85 ++++++++++++++++++++++------- tests/api_client/test_api_client.py | 27 +++------ 2 files changed, 74 insertions(+), 38 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 61d8b80c9..6e493ff3f 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -19,6 +19,7 @@ import urllib +import dateutil.parser import json import requests @@ -153,7 +154,7 @@ def create(self, params): if "POST" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("POST method on this resource is not supported") - if getattr(self.resource, "id", None) is not None: + if self.resource.id: raise ObjectAlreadyCreatedException("ID must be None when calling create") response = self.client.post(self.resource.PATH, params) @@ -197,13 +198,8 @@ def refresh_data(self, data): converts it to the correct type. """ - for key in self.resource.FIELDS: - # TODO: treat everything as strings, and don't overload the fields - # parameter to also convert the values. - if key in data and data[key] is not None: - setattr(self.resource, key, self.resource.FIELDS[key](data[key])) - else: - setattr(self.resource, key, None) + for field in self.resource.fields: + field.set(data.get(field.name, None)) class Resource: @@ -214,10 +210,55 @@ class Resource: SUPPORTED_METHODS = () PATH = "" - FIELDS = {} + fields = () def __init__(self): self.manager = ResourceManager(self) + for field in self.fields: + setattr(self, field.name, field) + + +class Field: + """ + A helper class to classify and clean data returned by the API. + """ + + value = None + + def __init__(self, name, clean=str): + """ + Initialize the Field object with a name and a cleaning function. + """ + + self.name = name + self.clean = clean + + def __str__(self): + """ + Return the string representation of the value. + """ + return str(self.value) + + def __bool__(self): + """ + Use the value to determine boolean state. + """ + return self.value is not None + + def set(self, value): + """ + Set the value of the Field to `value`. + """ + self.value = value + + @property + def cleaned_value(self): + """ + Return the cleaned value of the field (for example, an integer or Date + object) + """ + if self.value is not None: + return self.clean(self.value) class Job(Resource): @@ -228,14 +269,18 @@ class Job(Resource): SUPPORTED_METHODS = ("GET", "POST") PATH = "jobs" - # TODO: change this to a flat list. - FIELDS = { - "id": int, - "status": str, - "result_url": str, - "circuit_url": str, - "created_at": str, - "started_at": str, - "finished_at": str, - "running_time": str, - } + def __init__(self): + """ + Initialize the Job resource with a set of pre-defined fields. + """ + self.fields = ( + Field("id", int), + Field("status"), + Field("result_url"), + Field("circuit_url"), + Field("created_at", dateutil.parser.parse), + Field("started_at", dateutil.parser.parse), + Field("finished_at", dateutil.parser.parse), + Field("running_time", dateutil.parser.parse), + ) + super().__init__() diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 7d765c5a0..4167d222a 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -20,6 +20,7 @@ from strawberryfields import api_client from strawberryfields.api_client import ( requests, + Job, ResourceManager, ObjectAlreadyCreatedException, MethodNotSupportedException, @@ -267,27 +268,17 @@ def test_handle_refresh_data(self): mock_resource = MagicMock() mock_client = MagicMock() - fields = ( - "id", - "status", - "result_url", - "circuit_url", - "created_at", - "started_at", - "finished_at", - "running_time", - ) - - mock_resource.FIELDS = {f: MagicMock() for f in fields} + fields = [MagicMock() for i in range(5)] + + mock_resource.fields = {f: MagicMock() for f in fields} mock_data = {f: MagicMock() for f in fields} manager = ResourceManager(mock_resource, mock_client) manager.refresh_data(mock_data) - for key, value in mock_resource.FIELDS.items(): - value.assert_called_once() - # TODO: test that the attributes on the resource were set correctly + for field in mock_resource.fields: + field.set.assert_called_once() @pytest.mark.api_client @@ -306,19 +297,19 @@ def test_create_created(self, monkeypatch): requests, "post", lambda url, headers, data: MockPOSTResponse(201)) - job = api_client.Job() + job = Job() job.manager.create(params={}) keys_to_check = SAMPLE_JOB_CREATE_RESPONSE.keys() for key in keys_to_check: - assert getattr(job, key) == SAMPLE_JOB_CREATE_RESPONSE[key] + assert getattr(job, key).value == SAMPLE_JOB_CREATE_RESPONSE[key] def test_create_bad_request(self, monkeypatch): monkeypatch.setattr( requests, "post", lambda url, headers, data: MockPOSTResponse(400)) - job = api_client.Job() + job = Job() job.manager.create(params={}) assert job.manager.http_status_code == 400 From a0d31e047da74bbffbcaa59d9e6ca44e4e8b7dde Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 14:35:23 -0400 Subject: [PATCH 008/335] CodeFactor cleanup --- strawberryfields/api_client.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 6e493ff3f..199505160 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -18,9 +18,9 @@ """ import urllib +import json import dateutil.parser -import json import requests @@ -257,8 +257,7 @@ def cleaned_value(self): Return the cleaned value of the field (for example, an integer or Date object) """ - if self.value is not None: - return self.clean(self.value) + return self.clean(self.value) if self.value is not None else None class Job(Resource): From 51b6c59a4672fe271e0625eef93d8609e7e17c8c Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 14:47:42 -0400 Subject: [PATCH 009/335] Add python-dateutil to requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index b27cf960a..a7496f2d9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ scipy>=1.0.0 tensorflow==1.3 tensorflow-tensorboard>=0.1.8 quantum-blackbird +python-dateutil==2.8.0 From b42fa74678b2932d486bfa1abfa76456c383773e Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 15:02:24 -0400 Subject: [PATCH 010/335] running_time field should be string, not datetime. --- strawberryfields/api_client.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 199505160..8f3d8edfb 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -217,6 +217,12 @@ def __init__(self): for field in self.fields: setattr(self, field.name, field) + def reload(self): + """ + A helper method to fetch the latest data from the API. + """ + raise NotImplementedError + class Field: """ @@ -280,6 +286,6 @@ def __init__(self): Field("created_at", dateutil.parser.parse), Field("started_at", dateutil.parser.parse), Field("finished_at", dateutil.parser.parse), - Field("running_time", dateutil.parser.parse), + Field("running_time"), ) super().__init__() From 5d3b0594460b33b4fcda911d4fa7864ba12e8f10 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 15:11:05 -0400 Subject: [PATCH 011/335] Remove unnecessary tests for clean up. --- tests/api_client/test_api_client.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 4167d222a..ff6e6f6b6 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -154,18 +154,9 @@ def test_authenticate(self, client): password = 'TEST_PASSWORD' client.authenticate(username, password) - def test_set_authorization_header(self): - assert True - def test_join_path(self, client): assert client.join_path('jobs') == 'localhost/jobs' - def test_get(self, client, monkeypatch): - assert True - - def test_post(self, client): - assert True - @pytest.mark.api_client class TestResourceManager: @@ -283,15 +274,6 @@ def test_handle_refresh_data(self): @pytest.mark.api_client class TestJob: - def test_init(self): - assert True - - def test_get(self): - assert True - - def test_refresh_data(self): - assert True - def test_create_created(self, monkeypatch): monkeypatch.setattr( requests, @@ -313,6 +295,3 @@ def test_create_bad_request(self, monkeypatch): job.manager.create(params={}) assert job.manager.http_status_code == 400 - - def test_join_path(self): - assert True From e37692909b91b521c4d224f5237795480fa58f29 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 19 Jun 2019 16:17:24 -0400 Subject: [PATCH 012/335] Add documentation for api_client --- doc/code/api_client.rst | 3 ++ strawberryfields/api_client.py | 58 ++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 doc/code/api_client.rst diff --git a/doc/code/api_client.rst b/doc/code/api_client.rst new file mode 100644 index 000000000..fe0a27e51 --- /dev/null +++ b/doc/code/api_client.rst @@ -0,0 +1,3 @@ +.. automodule:: strawberryfields.api_client + :members: + :private-members: diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 8f3d8edfb..7467f4400 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -12,10 +12,62 @@ # See the License for the specific language governing permissions and # limitations under the License. -r""" -API Client library that interacts with the compute-service API over the HTTP -protocol. """ +APIClient library +================ + +**Module name:** :mod:`strawberryfields.api_client` + +.. currentmodule:: strawberryfields.api_client + + +This module provides a thin client that communicates with the compute-service API over the HTTP +protocol, based on the requests module. It also provides helper classes to facilitate interacting +with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient +that is available for each resource. + +A single APIClient instance can be used throughout one's session in the application. + +A typical use looks like this: + .. code-block:: python + + job = Job() + circuit = ''' + name StateTeleportation + version 1.0 + target gaussian (shots=1000) + + complex alpha = 1+0.5j + Coherent(alpha) | 0 + Squeezed(-4) | 1 + Squeezed(4) | 2 + BSgate(pi/4, 0) | (1, 2) + BSgate(pi/4, 0) | (0, 1) + MeasureX | 0 + MeasureP | 1 + Xgate(sqrt(2)*q0) | 2 + Zgate(sqrt(2)*q1) | 2 + MeasureHeterodyne() | 2 + ''' + job.manager.create({'circuit': circuit}) + + job.id # Returns the job ID that was generated by the server + job.reload() # Fetches the latest job data from the server + job.status # Prints the status of this job + + job.manager.get(1536) # Fetches job 1536 from the server and updates the instance + +Classes +------- + +.. autosummary:: + APIClient + Resource + ResourceManager + Field + Job +""" + import urllib import json From ca2a2b38855de64a0da9830a375610bc39f6740c Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 10:08:15 -0400 Subject: [PATCH 013/335] Attempt to get configuration from environment variables --- strawberryfields/api_client.py | 51 ++++++++++++++++++++++------- tests/api_client/test_api_client.py | 4 +-- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 7467f4400..9ca29bcd4 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -71,6 +71,7 @@ import urllib import json +import os import dateutil.parser import requests @@ -101,27 +102,55 @@ class APIClient: ALLOWED_BASE_URLS = ["localhost"] DEFAULT_BASE_URL = "localhost" - CONFIGURATION_PATH = "" - def __init__(self, use_ssl=True, base_url=None, **kwargs): + ENV_KEY_PREFIX = "SF_API" + ENV_AUTH_TOKEN_KEY = f"{ENV_KEY_PREFIX}_AUTH_TOKEN" + ENV_API_HOST_KEY = f"{ENV_KEY_PREFIX}_API_HOST" + ENV_USE_SSL_KEY = f"{ENV_KEY_PREFIX}_USE_SSL" + + def __init__(self, **kwargs): """ Initialize the API client with various parameters. """ # TODO: Load username, password, or authentication token from # configuration file - self.USE_SSL = use_ssl - self.AUTHENTICATION_TOKEN = kwargs.get("authentication_token", "") - self.HEADERS = {} + configuration = { + "use_ssl": True, + "base_url": self.DEFAULT_BASE_URL, + "authentication_token": None, + } - if not base_url: - self.BASE_URL = self.DEFAULT_BASE_URL - elif base_url in self.ALLOWED_BASE_URLS: - self.BASE_URL = base_url - else: + # Try getting everything first from environment variables + configuration.update(self.get_configuration_from_environment()) + + # Override any values that are explicitly passed when initializing client + configuration.update(kwargs) + + if configuration["base_url"] is None: + raise ValueError("base_url parameter is missing") + + if configuration["base_url"] not in self.ALLOWED_BASE_URLS: raise ValueError("base_url parameter not in allowed list") - def load_configuration(self): + self.HEADERS = {} + self.BASE_URL = configuration["base_url"] + self.AUTHENTICATION_TOKEN = configuration["authentication_token"] + self.USE_SSL = configuration["use_ssl"] + + # TODO: warn if not use_ssl + # TODO: warn if no authentication token + + def get_configuration_from_environment(self): + configuration = { + "authentication_token": os.environ.get(self.ENV_AUTH_TOKEN_KEY), + "base_url": os.environ.get(self.ENV_API_HOST_KEY), + "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY), + } + + return {key: value for key, value in configuration.items() if value is not None} + + def load_configuration_from_file(self): """ Loads username, password, and/or authentication token from a config file. diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index ff6e6f6b6..d829be0ae 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -135,7 +135,7 @@ class TestAPIClient: def test_init_default_client(self): client = api_client.APIClient() assert client.USE_SSL is True - assert client.AUTHENTICATION_TOKEN == '' + assert client.AUTHENTICATION_TOKEN is None assert client.BASE_URL == 'localhost' assert client.HEADERS == {} @@ -146,7 +146,7 @@ def test_init_custom_token_client(self): def test_load_configuration(self, client): with pytest.raises(NotImplementedError): - client.load_configuration() + client.load_configuration_from_file() def test_authenticate(self, client): with pytest.raises(NotImplementedError): From 88324252f19ae5ba8af7c1b425008e4d4f26ff4b Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 10:54:52 -0400 Subject: [PATCH 014/335] Directly pass parameters to Manager.create, rather than passing a dict --- strawberryfields/api_client.py | 4 ++-- tests/api_client/test_api_client.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 9ca29bcd4..6b75e46d9 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -49,7 +49,7 @@ Zgate(sqrt(2)*q1) | 2 MeasureHeterodyne() | 2 ''' - job.manager.create({'circuit': circuit}) + job.manager.create(circuit=circuit}) job.id # Returns the job ID that was generated by the server job.reload() # Fetches the latest job data from the server @@ -227,7 +227,7 @@ def get(self, job_id): response = self.client.get(self.join_path(str(job_id))) self.handle_response(response) - def create(self, params): + def create(self, **params): """ Attempts to create a new instance of a resource by sending a POST request to the appropriate endpoint. diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index d829be0ae..4ed0e0a58 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -204,7 +204,7 @@ def test_create_unsupported(self): mock_resource.SUPPORTED_METHODS = () manager = ResourceManager(mock_resource, MagicMock()) with pytest.raises(MethodNotSupportedException): - manager.create({}) + manager.create() def test_create_id_already_exists(self): mock_resource = MagicMock() @@ -212,7 +212,7 @@ def test_create_id_already_exists(self): mock_resource.id = MagicMock() manager = ResourceManager(mock_resource, MagicMock()) with pytest.raises(ObjectAlreadyCreatedException): - manager.create({}) + manager.create() def test_create(self, monkeypatch): mock_resource = MagicMock() @@ -226,7 +226,7 @@ def test_create(self, monkeypatch): manager = ResourceManager(mock_resource, mock_client) monkeypatch.setattr(manager, "handle_response", MagicMock()) - manager.create({}) + manager.create() # TODO test that this is called with correct path and params mock_client.post.assert_called_once() From c45ed73074cb679d250e73c170c932f456717331 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 11:25:02 -0400 Subject: [PATCH 015/335] Add support for JobResult and JobCircuit --- strawberryfields/api_client.py | 45 ++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 6b75e46d9..8796949bb 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -370,3 +370,48 @@ def __init__(self): Field("running_time"), ) super().__init__() + + def refresh_data(self): + super().refresh_data() + self.result = JobResult(self.id) + self.circuit = JobCircuit(self.id) + + +class JobResult(Resource): + """ + The API resource corresponding to the job result. + """ + + SUPPORTED_METHODS = ("GET", ) + PATH = "jobs/{job_id}/result" + + def __init__(self, job_id): + """ + Initialize the JobResult resource with a pre-defined field. + """ + self.fields = ( + Field("result", json.loads), + ) + + self.PATH = self.PATH.format(job_id=job_id) + super().__init__() + + +class JobCircuit(Resource): + """ + The API resource corresponding to the job circuit. + """ + + SUPPORTED_METHODS = ("GET", ) + PATH = "jobs/{job_id}/circuit" + + def __init__(self, job_id): + """ + Initialize the JobCircuit resource with a pre-defined field. + """ + self.fields = ( + Field("circuit"), + ) + + self.PATH = self.PATH.format(job_id=job_id) + super().__init__() From b1806ad1ef3b95f68775b9cd55117dfe4c2d1efc Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 11:27:19 -0400 Subject: [PATCH 016/335] Fix formatting for consistency --- strawberryfields/api_client.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 8796949bb..af3e67d01 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -382,16 +382,14 @@ class JobResult(Resource): The API resource corresponding to the job result. """ - SUPPORTED_METHODS = ("GET", ) + SUPPORTED_METHODS = ("GET",) PATH = "jobs/{job_id}/result" def __init__(self, job_id): """ Initialize the JobResult resource with a pre-defined field. """ - self.fields = ( - Field("result", json.loads), - ) + self.fields = (Field("result", json.loads),) self.PATH = self.PATH.format(job_id=job_id) super().__init__() @@ -402,16 +400,14 @@ class JobCircuit(Resource): The API resource corresponding to the job circuit. """ - SUPPORTED_METHODS = ("GET", ) + SUPPORTED_METHODS = ("GET",) PATH = "jobs/{job_id}/circuit" def __init__(self, job_id): """ Initialize the JobCircuit resource with a pre-defined field. """ - self.fields = ( - Field("circuit"), - ) + self.fields = (Field("circuit"),) self.PATH = self.PATH.format(job_id=job_id) super().__init__() From 8c7364a08528c9d259988ff5b04f86eec4455661 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 11:36:39 -0400 Subject: [PATCH 017/335] Reuse APIClient when setting result/circuit attributes --- strawberryfields/api_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index af3e67d01..491dc8907 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -293,8 +293,8 @@ class Resource: PATH = "" fields = () - def __init__(self): - self.manager = ResourceManager(self) + def __init__(self, client=None): + self.manager = ResourceManager(self, client=client) for field in self.fields: setattr(self, field.name, field) @@ -373,8 +373,8 @@ def __init__(self): def refresh_data(self): super().refresh_data() - self.result = JobResult(self.id) - self.circuit = JobCircuit(self.id) + self.result = JobResult(self.id, client=self.manager.client) + self.circuit = JobCircuit(self.id, client=self.manager.client) class JobResult(Resource): From 1905a658d2d9f7f1d349ef0c1bc005865eae3f70 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 11:46:49 -0400 Subject: [PATCH 018/335] Add reload method to facilitate updating job from server --- strawberryfields/api_client.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 491dc8907..d22784ac6 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -376,6 +376,12 @@ def refresh_data(self): self.result = JobResult(self.id, client=self.manager.client) self.circuit = JobCircuit(self.id, client=self.manager.client) + def reload(self): + if self.id: + self.manager.get(self.id) + else: + raise UserWarning('Can not reload job data since no job ID was provided') + class JobResult(Resource): """ From e172cb016adc16d4e5f4d7f84be8f2a9b3650549 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 12:10:58 -0400 Subject: [PATCH 019/335] Add scheme to BASE_URL, add HOSTNAME --- strawberryfields/api_client.py | 28 ++++++++++++++++------------ tests/api_client/test_api_client.py | 9 ++++++++- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index d22784ac6..84414b4fd 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -72,6 +72,7 @@ import urllib import json import os +import warnings import dateutil.parser import requests @@ -100,8 +101,8 @@ class APIClient: An object that allows the user to connect to the compute-service API. """ - ALLOWED_BASE_URLS = ["localhost"] - DEFAULT_BASE_URL = "localhost" + ALLOWED_HOSTNAMES = ["localhost"] + DEFAULT_HOSTNAME = "localhost" ENV_KEY_PREFIX = "SF_API" ENV_AUTH_TOKEN_KEY = f"{ENV_KEY_PREFIX}_AUTH_TOKEN" @@ -117,7 +118,7 @@ def __init__(self, **kwargs): configuration = { "use_ssl": True, - "base_url": self.DEFAULT_BASE_URL, + "hostname": self.DEFAULT_HOSTNAME, "authentication_token": None, } @@ -127,24 +128,27 @@ def __init__(self, **kwargs): # Override any values that are explicitly passed when initializing client configuration.update(kwargs) - if configuration["base_url"] is None: - raise ValueError("base_url parameter is missing") + if configuration["hostname"] is None: + raise ValueError("hostname parameter is missing") - if configuration["base_url"] not in self.ALLOWED_BASE_URLS: - raise ValueError("base_url parameter not in allowed list") + if configuration["hostname"] not in self.ALLOWED_HOSTNAMES: + raise ValueError("hostname parameter not in allowed list") - self.HEADERS = {} - self.BASE_URL = configuration["base_url"] - self.AUTHENTICATION_TOKEN = configuration["authentication_token"] self.USE_SSL = configuration["use_ssl"] + if not self.USE_SSL: + warnings.warn('Connecting insecurely to API server', UserWarning) + + self.HOSTNAME = configuration["hostname"] + self.BASE_URL = f"{'https' if self.USE_SSL else 'http'}://{self.HOSTNAME}" + self.AUTHENTICATION_TOKEN = configuration["authentication_token"] + self.HEADERS = {} - # TODO: warn if not use_ssl # TODO: warn if no authentication token def get_configuration_from_environment(self): configuration = { "authentication_token": os.environ.get(self.ENV_AUTH_TOKEN_KEY), - "base_url": os.environ.get(self.ENV_API_HOST_KEY), + "hostname": os.environ.get(self.ENV_API_HOST_KEY), "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY), } diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 4ed0e0a58..6ca6ba4f6 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -136,7 +136,14 @@ def test_init_default_client(self): client = api_client.APIClient() assert client.USE_SSL is True assert client.AUTHENTICATION_TOKEN is None - assert client.BASE_URL == 'localhost' + assert client.BASE_URL == 'https://localhost' + assert client.HEADERS == {} + + def test_init_default_client_no_ssl(self): + client = api_client.APIClient(use_ssl=False) + assert client.USE_SSL is False + assert client.AUTHENTICATION_TOKEN is None + assert client.BASE_URL == 'http://localhost' assert client.HEADERS == {} def test_init_custom_token_client(self): From a8c2750f8bea1572dded2fb7080ace7e0d46a8c8 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 12:47:47 -0400 Subject: [PATCH 020/335] Capture connection errors --- strawberryfields/api_client.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 84414b4fd..d8d5a8c2f 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -185,15 +185,26 @@ def get(self, path): """ Sends a GET request to the provided path. Returns a response object. """ - return requests.get(url=self.join_path(path), headers=self.HEADERS) + try: + response = requests.get(url=self.join_path(path), headers=self.HEADERS) + except requests.exceptions.ConnectionError as e: + response = None + warnings.warn(f'Could not connect to server ({e})') + return response def post(self, path, payload): """ Converts payload to a JSON string. Sends a POST request to the provided path. Returns a response object. """ + # TODO: catch any exceptions from dumping JSON data = json.dumps(payload) - return requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) + try: + response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) + except requests.exceptions.ConnectionError as e: + response = None + warnings.warn(f'Could not connect to server ({e})') + return response class ResourceManager: From f326ad3078eee6e46cf5e77d646943a733d83bbf Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 12:51:10 -0400 Subject: [PATCH 021/335] Bug fix in test --- strawberryfields/api_client.py | 8 ++++---- tests/api_client/test_api_client.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index d8d5a8c2f..d93c57a34 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -136,7 +136,7 @@ def __init__(self, **kwargs): self.USE_SSL = configuration["use_ssl"] if not self.USE_SSL: - warnings.warn('Connecting insecurely to API server', UserWarning) + warnings.warn("Connecting insecurely to API server", UserWarning) self.HOSTNAME = configuration["hostname"] self.BASE_URL = f"{'https' if self.USE_SSL else 'http'}://{self.HOSTNAME}" @@ -189,7 +189,7 @@ def get(self, path): response = requests.get(url=self.join_path(path), headers=self.HEADERS) except requests.exceptions.ConnectionError as e: response = None - warnings.warn(f'Could not connect to server ({e})') + warnings.warn(f"Could not connect to server ({e})") return response def post(self, path, payload): @@ -203,7 +203,7 @@ def post(self, path, payload): response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) except requests.exceptions.ConnectionError as e: response = None - warnings.warn(f'Could not connect to server ({e})') + warnings.warn(f"Could not connect to server ({e})") return response @@ -395,7 +395,7 @@ def reload(self): if self.id: self.manager.get(self.id) else: - raise UserWarning('Can not reload job data since no job ID was provided') + raise UserWarning("Can not reload job data since no job ID was provided") class JobResult(Resource): diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 6ca6ba4f6..228e26da4 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -162,7 +162,7 @@ def test_authenticate(self, client): client.authenticate(username, password) def test_join_path(self, client): - assert client.join_path('jobs') == 'localhost/jobs' + assert client.join_path('jobs') == f'{client.BASE_URL}/jobs' @pytest.mark.api_client From 964a52ed9aabbdcf0fed90b826c388fd4200d50e Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 13:47:49 -0400 Subject: [PATCH 022/335] Clean up and rename environment variable keys for consistency --- strawberryfields/api_client.py | 46 +++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index d93c57a34..224c82eba 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -104,10 +104,10 @@ class APIClient: ALLOWED_HOSTNAMES = ["localhost"] DEFAULT_HOSTNAME = "localhost" - ENV_KEY_PREFIX = "SF_API" - ENV_AUTH_TOKEN_KEY = f"{ENV_KEY_PREFIX}_AUTH_TOKEN" - ENV_API_HOST_KEY = f"{ENV_KEY_PREFIX}_API_HOST" - ENV_USE_SSL_KEY = f"{ENV_KEY_PREFIX}_USE_SSL" + ENV_KEY_PREFIX = "SF_API_" + ENV_AUTHENTICATION_TOKEN_KEY = f"{ENV_KEY_PREFIX}AUTHENTICATION_TOKEN" + ENV_API_HOSTNAME_KEY = f"{ENV_KEY_PREFIX}API_HOSTNAME" + ENV_USE_SSL_KEY = f"{ENV_KEY_PREFIX}USE_SSL" def __init__(self, **kwargs): """ @@ -146,9 +146,15 @@ def __init__(self, **kwargs): # TODO: warn if no authentication token def get_configuration_from_environment(self): + """ + Retrieve configuration from environment variables. The variables are defined as follows: + - SF_API_USE_SSL: True or False + - SF_API_HOSTNAME: The hostname of the server to connect to + - SF_API_AUTHENTICATION_TOKEN: The authentication token to use when connecting to the API + """ configuration = { - "authentication_token": os.environ.get(self.ENV_AUTH_TOKEN_KEY), - "hostname": os.environ.get(self.ENV_API_HOST_KEY), + "authentication_token": os.environ.get(self.ENV_AUTHENTICATION_TOKEN_KEY), + "hostname": os.environ.get(self.ENV_API_HOSTNAME_KEY), "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY), } @@ -317,7 +323,13 @@ def reload(self): """ A helper method to fetch the latest data from the API. """ - raise NotImplementedError + if not hasattr(self, "id"): + raise TypeError("Resource does not have an ID") + + if self.id: + self.manager.get(self.id) + else: + warnings.warn("Could not reload resource data", UserWarning) class Field: @@ -384,18 +396,24 @@ def __init__(self): Field("finished_at", dateutil.parser.parse), Field("running_time"), ) + + self.result = None + self.circuit = None + super().__init__() def refresh_data(self): + """ + Refresh the job fields and attach a JobResult and JobCircuit object to the Job instance. + """ + super().refresh_data() - self.result = JobResult(self.id, client=self.manager.client) - self.circuit = JobCircuit(self.id, client=self.manager.client) - def reload(self): - if self.id: - self.manager.get(self.id) - else: - raise UserWarning("Can not reload job data since no job ID was provided") + if self.result is not None: + self.result = JobResult(self.id, client=self.manager.client) + + if self.circuit is not None: + self.circuit = JobCircuit(self.id, client=self.manager.client) class JobResult(Resource): From eafca76c5f53114542b6d52edf3e59cef2b45394 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 13:56:38 -0400 Subject: [PATCH 023/335] Small refactor on join_path --- strawberryfields/api_client.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 224c82eba..daf992108 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -78,6 +78,10 @@ import requests +def join_path(base_path, path): + return urllib.parse.urljoin(f"{base_path}/", path) + + class MethodNotSupportedException(TypeError): """ Exception to be raised when a ResourceManager method is not supported for a @@ -185,7 +189,7 @@ def join_path(self, path): """ Joins a base url with an additional path (e.g. a resource name and ID) """ - return urllib.parse.urljoin(f"{self.BASE_URL}/", path) + return join_path(self.BASE_URL, path) def get(self, path): """ @@ -234,7 +238,7 @@ def join_path(self, path): """ Joins a resource base path with an additional path (e.g. an ID) """ - return urllib.parse.urljoin(f"{self.resource.PATH}/", path) + return join_path(self.resource.PATH, path) def get(self, job_id): """ From 551b14d069e23f31106fb4d158c0ba3487d755d0 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 13:59:05 -0400 Subject: [PATCH 024/335] Add missing docstring --- strawberryfields/api_client.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index daf992108..82e9278f8 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -79,6 +79,9 @@ def join_path(base_path, path): + """ + Joins two paths, a base path and another path and returns a string. + """ return urllib.parse.urljoin(f"{base_path}/", path) From 80d2f9a3c6778ae18d5841f4abad5a691b4422b0 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 14:08:02 -0400 Subject: [PATCH 025/335] Add missing documentation --- strawberryfields/api_client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 82e9278f8..9ba702c39 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -54,6 +54,10 @@ job.id # Returns the job ID that was generated by the server job.reload() # Fetches the latest job data from the server job.status # Prints the status of this job + job.result # Returns a JobResult object + job.circuit # Returns a JobCircuit object + + job.result.reload() # Reloads the JobResult object from the API job.manager.get(1536) # Fetches job 1536 from the server and updates the instance From 7e2d12b60fe3e18b8efebe27fc573316f8cea8e4 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 14:24:15 -0400 Subject: [PATCH 026/335] Message the user when an error response is received from the server --- strawberryfields/api_client.py | 24 ++++++++++++++++++------ tests/api_client/test_api_client.py | 5 +++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 9ba702c39..b104f029f 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -296,14 +296,26 @@ def handle_error_response(self, response): Handles an error response that is returned by the server. """ - if response.status_code == 400: - pass + # TODO: Improve error messaging and parse the actual error output (json). + + if response.status_code in (400, 409): + warnings.warn( + "The server did not accept the request, and returned an error " + f"({response.status_code}: {response.text}).", + UserWarning, + ) elif response.status_code == 401: - pass - elif response.status_code == 409: - pass + warnings.warn( + "The server did not accept the request due to an authentication error " + f"({response.status_code}: {response.text}).", + UserWarning, + ) elif response.status_code in (500, 503, 504): - pass + warnings.warn( + f"The client encountered an unexpected temporary server error " + "({response.status_code}: {response.text}).", + UserWarning, + ) def refresh_data(self, data): """ diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 228e26da4..f9e34f945 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -17,6 +17,7 @@ """ import pytest +import json from strawberryfields import api_client from strawberryfields.api_client import ( requests, @@ -68,6 +69,10 @@ def __init__(self, status_code): def json(self): return self.possible_responses[self.status_code] + @property + def text(self): + return json.dumps(self.json()) + def raise_for_status(self): raise requests.exceptions.HTTPError() From d9fcc1b97adde59b64b678afcdba2268f466f3d2 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 14:50:27 -0400 Subject: [PATCH 027/335] Better documentation (add Args and Returns where applicable) --- strawberryfields/api_client.py | 77 ++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 4 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index b104f029f..2c3cf9403 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -85,6 +85,13 @@ def join_path(base_path, path): """ Joins two paths, a base path and another path and returns a string. + + Args: + base_path (str): The left side of the joined path. + path (str): The right side of the joined path. + + Returns: + str: A joined path. """ return urllib.parse.urljoin(f"{base_path}/", path) @@ -181,7 +188,11 @@ def load_configuration_from_file(self): def authenticate(self, username, password): """ Retrieve an authentication token from the server via username - and password authentication. + and password authentication and calls set_authorization_header. + + Args: + username (str): A user name. + password (str): A password. """ raise NotImplementedError() @@ -189,18 +200,34 @@ def set_authorization_header(self, authentication_token): """ Adds the authorization header to the headers dictionary to be included with all API requests. + + Args: + authentication_token (str): An authentication token used to access the API. """ self.headers["Authorization"] = authentication_token def join_path(self, path): """ Joins a base url with an additional path (e.g. a resource name and ID) + + Args: + path (str): A path to be joined with BASE_URL. + + Returns: + str: A joined path. """ return join_path(self.BASE_URL, path) def get(self, path): """ Sends a GET request to the provided path. Returns a response object. + + Args: + path (str): A path to send the GET request to. + + Returns: + requests.Response: A response object, or None if no response could be fetched from the + server. """ try: response = requests.get(url=self.join_path(path), headers=self.HEADERS) @@ -213,6 +240,14 @@ def post(self, path, payload): """ Converts payload to a JSON string. Sends a POST request to the provided path. Returns a response object. + + Args: + path (str): A path to send the GET request to. + payload: A JSON serializable object to be sent to the server. + + Returns: + requests.Response: A response object, or None if no response could be fetched from the + server. """ # TODO: catch any exceptions from dumping JSON data = json.dumps(payload) @@ -252,6 +287,9 @@ def get(self, job_id): Attempts to retrieve a particular record by sending a GET request to the appropriate endpoint. If successful, the resource object is populated with the data in the response. + + Args: + job_id (int): The ID of an object to be retrieved. """ if "GET" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("GET method on this resource is not supported") @@ -263,6 +301,9 @@ def create(self, **params): """ Attempts to create a new instance of a resource by sending a POST request to the appropriate endpoint. + + Args: + **params: Arbitrary parameters to be passed on to the POST request. """ if "POST" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("POST method on this resource is not supported") @@ -278,6 +319,9 @@ def handle_response(self, response): """ Store the status code on the manager object and handle the response based on the status code. + + Args: + response (requests.Response): A response object to be parsed. """ self.http_status_code = response.status_code if response.status_code in (200, 201): @@ -288,12 +332,18 @@ def handle_response(self, response): def handle_success_response(self, response): """ Handles a successful response by refreshing the instance fields. + + Args: + response (requests.Response): A response object to be parsed. """ self.refresh_data(response.json()) def handle_error_response(self, response): """ Handles an error response that is returned by the server. + + Args: + response (requests.Response): A response object to be parsed. """ # TODO: Improve error messaging and parse the actual error output (json). @@ -321,8 +371,10 @@ def refresh_data(self, data): """ Refreshes the instance's attributes with the provided data and converts it to the correct type. - """ + Args: + data (dict): A dictionary containing keys and values of data to be stored on the object. + """ for field in self.resource.fields: field.set(data.get(field.name, None)) @@ -338,6 +390,12 @@ class Resource: fields = () def __init__(self, client=None): + """ + Initialize the Resource by populating attributes based on fields and settings a manager. + + Args: + client (APIClient): An APIClient instance to use as a client. + """ self.manager = ResourceManager(self, client=client) for field in self.fields: setattr(self, field.name, field) @@ -365,8 +423,11 @@ class Field: def __init__(self, name, clean=str): """ Initialize the Field object with a name and a cleaning function. - """ + Args: + name (str): A string representing the name of the field (e.g. "created_at"). + clean: A method that returns a cleaned value of the field, of the correct type. + """ self.name = name self.clean = clean @@ -385,6 +446,9 @@ def __bool__(self): def set(self, value): """ Set the value of the Field to `value`. + + Args: + value: The value to be stored on the Field object. """ self.value = value @@ -429,7 +493,6 @@ def refresh_data(self): """ Refresh the job fields and attach a JobResult and JobCircuit object to the Job instance. """ - super().refresh_data() if self.result is not None: @@ -450,6 +513,9 @@ class JobResult(Resource): def __init__(self, job_id): """ Initialize the JobResult resource with a pre-defined field. + + Args: + job_id (int): The ID of the Job object corresponding to the JobResult object. """ self.fields = (Field("result", json.loads),) @@ -468,6 +534,9 @@ class JobCircuit(Resource): def __init__(self, job_id): """ Initialize the JobCircuit resource with a pre-defined field. + + Args: + job_id (int): The ID of the Job object corresponding to the JobResult object. """ self.fields = (Field("circuit"),) From f9ea7edb755756785e1da414c0aad2aa203d04ba Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 15:05:22 -0400 Subject: [PATCH 028/335] Parse environment variable for SSL correctly. --- strawberryfields/api_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 2c3cf9403..a2d0a4bb4 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -173,10 +173,10 @@ def get_configuration_from_environment(self): configuration = { "authentication_token": os.environ.get(self.ENV_AUTHENTICATION_TOKEN_KEY), "hostname": os.environ.get(self.ENV_API_HOSTNAME_KEY), - "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY), + "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY) in ("1", "True", "TRUE"), } - return {key: value for key, value in configuration.items() if value is not None} + return {key: value for key, value in configuration.items() if key in os.environ} def load_configuration_from_file(self): """ From 2b58bb9373f37fc5c394df526b45495c8edfaad9 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 15:42:04 -0400 Subject: [PATCH 029/335] Minor fixes and changes (typos, missing parameters) --- strawberryfields/api_client.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index a2d0a4bb4..e6a19f44a 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -21,12 +21,14 @@ .. currentmodule:: strawberryfields.api_client -This module provides a thin client that communicates with the compute-service API over the HTTP +This module provides a thin client that communicates with the Xanadu Platform API over the HTTP protocol, based on the requests module. It also provides helper classes to facilitate interacting with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient that is available for each resource. -A single APIClient instance can be used throughout one's session in the application. +A single APIClient instance can be used throughout one's session in the application. The application +will attempt to configure the APIClient instance using a configuration file or defaults, but the +user can choose to override various parameters of the APIClient manually. A typical use looks like this: .. code-block:: python @@ -49,7 +51,7 @@ Zgate(sqrt(2)*q1) | 2 MeasureHeterodyne() | 2 ''' - job.manager.create(circuit=circuit}) + job.manager.create(circuit=circuit) job.id # Returns the job ID that was generated by the server job.reload() # Fetches the latest job data from the server @@ -116,7 +118,7 @@ class ObjectAlreadyCreatedException(TypeError): class APIClient: """ - An object that allows the user to connect to the compute-service API. + An object that allows the user to connect to the Xanadu Platform API. """ ALLOWED_HOSTNAMES = ["localhost"] @@ -268,7 +270,7 @@ class ResourceManager: def __init__(self, resource, client=None): """ - Initialize the manager with resource and client instances . A client + Initialize the manager with resource and client instances. A client instance is used as a persistent HTTP communications object, and a resource instance corresponds to a particular type of resource (e.g. Job) @@ -469,7 +471,7 @@ class Job(Resource): SUPPORTED_METHODS = ("GET", "POST") PATH = "jobs" - def __init__(self): + def __init__(self, client=None): """ Initialize the Job resource with a set of pre-defined fields. """ @@ -487,7 +489,7 @@ def __init__(self): self.result = None self.circuit = None - super().__init__() + super().__init__(client=client) def refresh_data(self): """ @@ -510,7 +512,7 @@ class JobResult(Resource): SUPPORTED_METHODS = ("GET",) PATH = "jobs/{job_id}/result" - def __init__(self, job_id): + def __init__(self, job_id, client=None): """ Initialize the JobResult resource with a pre-defined field. @@ -520,7 +522,7 @@ def __init__(self, job_id): self.fields = (Field("result", json.loads),) self.PATH = self.PATH.format(job_id=job_id) - super().__init__() + super().__init__(client=client) class JobCircuit(Resource): @@ -531,7 +533,7 @@ class JobCircuit(Resource): SUPPORTED_METHODS = ("GET",) PATH = "jobs/{job_id}/circuit" - def __init__(self, job_id): + def __init__(self, job_id, client=None): """ Initialize the JobCircuit resource with a pre-defined field. @@ -541,4 +543,4 @@ def __init__(self, job_id): self.fields = (Field("circuit"),) self.PATH = self.PATH.format(job_id=job_id) - super().__init__() + super().__init__(client=client) From 93ee74e9e8e92373c7f39094d98ddabcf018f9f6 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 20 Jun 2019 16:20:23 -0400 Subject: [PATCH 030/335] Remove f-strings to maintain compatibility --- strawberryfields/api_client.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index e6a19f44a..b1cf72318 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -95,7 +95,7 @@ def join_path(base_path, path): Returns: str: A joined path. """ - return urllib.parse.urljoin(f"{base_path}/", path) + return urllib.parse.urljoin("{}/".format(base_path), path) class MethodNotSupportedException(TypeError): @@ -125,9 +125,9 @@ class APIClient: DEFAULT_HOSTNAME = "localhost" ENV_KEY_PREFIX = "SF_API_" - ENV_AUTHENTICATION_TOKEN_KEY = f"{ENV_KEY_PREFIX}AUTHENTICATION_TOKEN" - ENV_API_HOSTNAME_KEY = f"{ENV_KEY_PREFIX}API_HOSTNAME" - ENV_USE_SSL_KEY = f"{ENV_KEY_PREFIX}USE_SSL" + ENV_AUTHENTICATION_TOKEN_KEY = "{}AUTHENTICATION_TOKEN".format(ENV_KEY_PREFIX) + ENV_API_HOSTNAME_KEY = "{}API_HOSTNAME".format(ENV_KEY_PREFIX) + ENV_USE_SSL_KEY = "{}USE_SSL".format(ENV_KEY_PREFIX) def __init__(self, **kwargs): """ @@ -159,7 +159,7 @@ def __init__(self, **kwargs): warnings.warn("Connecting insecurely to API server", UserWarning) self.HOSTNAME = configuration["hostname"] - self.BASE_URL = f"{'https' if self.USE_SSL else 'http'}://{self.HOSTNAME}" + self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) self.AUTHENTICATION_TOKEN = configuration["authentication_token"] self.HEADERS = {} @@ -235,7 +235,7 @@ def get(self, path): response = requests.get(url=self.join_path(path), headers=self.HEADERS) except requests.exceptions.ConnectionError as e: response = None - warnings.warn(f"Could not connect to server ({e})") + warnings.warn("Could not connect to server ({})".format(e)) return response def post(self, path, payload): @@ -257,7 +257,7 @@ def post(self, path, payload): response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) except requests.exceptions.ConnectionError as e: response = None - warnings.warn(f"Could not connect to server ({e})") + warnings.warn("Could not connect to server ({})".format(e)) return response @@ -353,19 +353,19 @@ def handle_error_response(self, response): if response.status_code in (400, 409): warnings.warn( "The server did not accept the request, and returned an error " - f"({response.status_code}: {response.text}).", + "({}: {}).".format(response.status_code, response.text), UserWarning, ) elif response.status_code == 401: warnings.warn( "The server did not accept the request due to an authentication error " - f"({response.status_code}: {response.text}).", + "({}: {}).".format(response.status_code, response.text), UserWarning, ) elif response.status_code in (500, 503, 504): warnings.warn( - f"The client encountered an unexpected temporary server error " - "({response.status_code}: {response.text}).", + "The client encountered an unexpected temporary server error " + "({}: {}).".format(response.status_code, response.text), UserWarning, ) From d10b6afe4efecfacd886f8eaabb4efe0af455e69 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 25 Jun 2019 09:06:35 -0400 Subject: [PATCH 031/335] Clean up (fix typos, rename parameters, add missing text) --- strawberryfields/api_client.py | 26 ++++++++++++++++---------- tests/api_client/test_api_client.py | 4 ++-- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index b1cf72318..88db7b9c9 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -210,7 +210,7 @@ def set_authorization_header(self, authentication_token): def join_path(self, path): """ - Joins a base url with an additional path (e.g. a resource name and ID) + Joins a base url with an additional path (e.g., a resource name and ID) Args: path (str): A path to be joined with BASE_URL. @@ -275,28 +275,28 @@ def __init__(self, resource, client=None): resource instance corresponds to a particular type of resource (e.g. Job) """ - setattr(self, "resource", resource) - setattr(self, "client", client or APIClient()) + self.resource = resource + self.client = client or APIClient() def join_path(self, path): """ - Joins a resource base path with an additional path (e.g. an ID) + Joins a resource base path with an additional path (e.g., an ID) """ return join_path(self.resource.PATH, path) - def get(self, job_id): + def get(self, resource_id): """ Attempts to retrieve a particular record by sending a GET request to the appropriate endpoint. If successful, the resource object is populated with the data in the response. Args: - job_id (int): The ID of an object to be retrieved. + resource_id (int): The ID of an object to be retrieved. """ if "GET" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("GET method on this resource is not supported") - response = self.client.get(self.join_path(str(job_id))) + response = self.client.get(self.join_path(str(resource_id))) self.handle_response(response) def create(self, **params): @@ -350,7 +350,7 @@ def handle_error_response(self, response): # TODO: Improve error messaging and parse the actual error output (json). - if response.status_code in (400, 409): + if response.status_code in (400, 404, 409): warnings.warn( "The server did not accept the request, and returned an error " "({}: {}).".format(response.status_code, response.text), @@ -368,6 +368,12 @@ def handle_error_response(self, response): "({}: {}).".format(response.status_code, response.text), UserWarning, ) + else: + warnings.warn( + "The client encountered an unexpected server error " + "({}: {}).".format(response.status_code, response.text), + UserWarning, + ) def refresh_data(self, data): """ @@ -393,7 +399,7 @@ class Resource: def __init__(self, client=None): """ - Initialize the Resource by populating attributes based on fields and settings a manager. + Initialize the Resource by populating attributes based on fields and setting a manager. Args: client (APIClient): An APIClient instance to use as a client. @@ -427,7 +433,7 @@ def __init__(self, name, clean=str): Initialize the Field object with a name and a cleaning function. Args: - name (str): A string representing the name of the field (e.g. "created_at"). + name (str): A string representing the name of the field (e.g., "created_at"). clean: A method that returns a cleaned value of the field, of the correct type. """ self.name = name diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index f9e34f945..225a4528e 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -112,8 +112,8 @@ class MockGETResponse(MockResponse): "detail": "Requires authentication" }, 404: { - "code": "", - "detail": "", + "code": "not-found", + "detail": "The requested resource could not be found or does not exist.", }, 500: { "code": "server-error", From d2511e1024ef0d48e9267e179075bc7bdc12c32e Mon Sep 17 00:00:00 2001 From: Nathan Killoran Date: Tue, 25 Jun 2019 13:00:50 -0400 Subject: [PATCH 032/335] Apply suggestions from code review --- strawberryfields/api_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 88db7b9c9..091b95697 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -272,7 +272,7 @@ def __init__(self, resource, client=None): """ Initialize the manager with resource and client instances. A client instance is used as a persistent HTTP communications object, and a - resource instance corresponds to a particular type of resource (e.g. + resource instance corresponds to a particular type of resource (e.g., Job) """ self.resource = resource From 27e91fe0c977c21932d319b7a7f3aa30c1ec5ed7 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 09:11:19 -0400 Subject: [PATCH 033/335] Clean up tests, add dependency to setup.py, minor bug fix --- setup.py | 3 +- strawberryfields/api_client.py | 10 ++- tests/api_client/test_api_client.py | 95 +++++++++++++++++++++++++++-- 3 files changed, 101 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 0b20ab6bd..1c2df1975 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,8 @@ "numpy>=1.16.3", "scipy>=1.0.0", "networkx>=2.0", - "quantum-blackbird>=0.2.0" + "quantum-blackbird>=0.2.0", + "python-dateutil>=2.8.0", ] # extra_requirements = [ diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 091b95697..de4965079 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -121,7 +121,10 @@ class APIClient: An object that allows the user to connect to the Xanadu Platform API. """ - ALLOWED_HOSTNAMES = ["localhost"] + ALLOWED_HOSTNAMES = [ + "localhost", + "localhost:8080", + ] DEFAULT_HOSTNAME = "localhost" ENV_KEY_PREFIX = "SF_API_" @@ -163,6 +166,9 @@ def __init__(self, **kwargs): self.AUTHENTICATION_TOKEN = configuration["authentication_token"] self.HEADERS = {} + if self.AUTHENTICATION_TOKEN is not None: + self.set_authorization_header(self.AUTHENTICATION_TOKEN) + # TODO: warn if no authentication token def get_configuration_from_environment(self): @@ -206,7 +212,7 @@ def set_authorization_header(self, authentication_token): Args: authentication_token (str): An authentication token used to access the API. """ - self.headers["Authorization"] = authentication_token + self.HEADERS["Authorization"] = authentication_token def join_path(self, path): """ diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 225a4528e..4262c52ba 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -61,6 +61,11 @@ def client(): class MockResponse: + ''' + A helper class to generate a mock response based on status code. Mocks + the `json` and `text` attributes of a requests.Response class. + ''' + status_code = None def __init__(self, status_code): @@ -138,6 +143,9 @@ def raise_for_status(self): @pytest.mark.api_client class TestAPIClient: def test_init_default_client(self): + """ + Test that initializing a default client generates an APIClient with the expected params. + """ client = api_client.APIClient() assert client.USE_SSL is True assert client.AUTHENTICATION_TOKEN is None @@ -145,6 +153,10 @@ def test_init_default_client(self): assert client.HEADERS == {} def test_init_default_client_no_ssl(self): + """ + Test setting use_ssl to False when initializing a client generates the correct base URL and + sets the correct flag. + """ client = api_client.APIClient(use_ssl=False) assert client.USE_SSL is False assert client.AUTHENTICATION_TOKEN is None @@ -152,27 +164,64 @@ def test_init_default_client_no_ssl(self): assert client.HEADERS == {} def test_init_custom_token_client(self): + """ + Test that the token is correctly set when initializing a client. + """ test_token = 'TEST' client = api_client.APIClient(authentication_token=test_token) assert client.AUTHENTICATION_TOKEN == test_token - def test_load_configuration(self, client): + def test_init_custom_token_client_headers_set(self, monkeypatch): + """ + Test that set_authentication_token is being called when setting a custom token. + """ + test_token = 'TEST' + mock_set_authorization_header = MagicMock() + monkeypatch.setattr( + api_client.APIClient, "set_authorization_header", mock_set_authorization_header) + api_client.APIClient(authentication_token=test_token) + mock_set_authorization_header.assert_called_once_with(test_token) + + def test_set_authorization_header(self): + """ + Test that the authentication token is added to the header correctly. + """ + client = api_client.APIClient() + + authentication_token = MagicMock() + client.set_authorization_header(authentication_token) + assert client.HEADERS['Authorization'] == authentication_token + + def test_load_configuration_from_file(self, client): + """ + Test that the configuration is loaded from file correctly (not yet implemented). + """ with pytest.raises(NotImplementedError): client.load_configuration_from_file() def test_authenticate(self, client): + """ + Test that the client can authenticate correctly (not yet implemented). + """ with pytest.raises(NotImplementedError): username = 'TEST_USER' password = 'TEST_PASSWORD' client.authenticate(username, password) def test_join_path(self, client): - assert client.join_path('jobs') == f'{client.BASE_URL}/jobs' + """ + Test that two paths can be joined and separated by a forward slash. + """ + assert client.join_path('jobs') == '{client.BASE_URL}/jobs'.format(client=client) @pytest.mark.api_client class TestResourceManager: def test_init(self): + """ + Test that a resource manager instance can be initialized correctly with a resource and + client instance + """ resource = MagicMock() client = MagicMock() manager = ResourceManager(resource, client) @@ -181,6 +230,9 @@ def test_init(self): assert manager.client == client def test_join_path(self): + """ + Test that the resource path can be joined corectly with the base path + """ mock_resource = MagicMock() mock_resource.PATH = 'some-path' @@ -188,6 +240,10 @@ def test_join_path(self): assert manager.join_path('test') == "some-path/test" def test_get_unsupported(self): + """ + Test a GET request with a resource that does not support it. Asserts that + MethodNotSupportedException is raised. + """ mock_resource = MagicMock() mock_resource.SUPPORTED_METHODS = () manager = ResourceManager(mock_resource, MagicMock()) @@ -195,6 +251,10 @@ def test_get_unsupported(self): manager.get(1) def test_get(self, monkeypatch): + """ + Test a successful GET request. Tests that manager.handle_response is being called with + the correct Response object. + """ mock_resource = MagicMock() mock_client = MagicMock() mock_response = MagicMock() @@ -212,6 +272,10 @@ def test_get(self, monkeypatch): manager.handle_response.assert_called_once_with(mock_response) def test_create_unsupported(self): + """ + Test a POST (create) request with a resource that does not support that type or request. + Asserts that MethodNotSupportedException is raised. + """ mock_resource = MagicMock() mock_resource.SUPPORTED_METHODS = () manager = ResourceManager(mock_resource, MagicMock()) @@ -219,6 +283,10 @@ def test_create_unsupported(self): manager.create() def test_create_id_already_exists(self): + """ + Tests that once an object is created, create method can not be called again. Asserts that + ObjectAlreadyCreatedException is raised. + """ mock_resource = MagicMock() mock_resource.SUPPORTED_METHODS = ('POST',) mock_resource.id = MagicMock() @@ -227,6 +295,10 @@ def test_create_id_already_exists(self): manager.create() def test_create(self, monkeypatch): + """ + Tests a successful POST (create) method. Asserts that handle_response is called with the + correct Response object. + """ mock_resource = MagicMock() mock_client = MagicMock() mock_response = MagicMock() @@ -245,6 +317,10 @@ def test_create(self, monkeypatch): manager.handle_response.assert_called_once_with(mock_response) def test_handle_response(self, monkeypatch): + """ + Tests that a successful response initiates a call to handle_success_response, and that an + error response initiates a call to handle_error_response. + """ mock_resource = MagicMock() mock_client = MagicMock() mock_response = MagicMock() @@ -268,25 +344,33 @@ def test_handle_response(self, monkeypatch): mock_handle_success_response.assert_called_once_with(mock_response) def test_handle_refresh_data(self): + """ + Tests the ResourceManager.refresh_data method. Ensures that Field.set is called once with + the correct data value. + """ mock_resource = MagicMock() mock_client = MagicMock() fields = [MagicMock() for i in range(5)] mock_resource.fields = {f: MagicMock() for f in fields} - mock_data = {f: MagicMock() for f in fields} + mock_data = {f.name: MagicMock() for f in fields} manager = ResourceManager(mock_resource, mock_client) manager.refresh_data(mock_data) for field in mock_resource.fields: - field.set.assert_called_once() + field.set.assert_called_once_with(mock_data[field.name]) @pytest.mark.api_client class TestJob: def test_create_created(self, monkeypatch): + """ + Tests a successful Job creatioin with a mock POST response. Asserts that all fields on + the Job instance have been set correctly and match the mock data. + """ monkeypatch.setattr( requests, "post", @@ -299,6 +383,9 @@ def test_create_created(self, monkeypatch): assert getattr(job, key).value == SAMPLE_JOB_CREATE_RESPONSE[key] def test_create_bad_request(self, monkeypatch): + """ + Tests that the correct error code is returned when a bad request is sent to the server. + """ monkeypatch.setattr( requests, "post", From 328e13325bcaadc939c7217f976eee8cfc7ce0b5 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 09:12:30 -0400 Subject: [PATCH 034/335] Run black on tests --- tests/api_client/test_api_client.py | 75 +++++++++++------------------ 1 file changed, 29 insertions(+), 46 deletions(-) diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 4262c52ba..aee6960bb 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -56,15 +56,15 @@ def client(): "created_at": "2019-05-24T15:55:43.872531Z", "started_at": "2019-05-24T16:01:12.145636Z", "finished_at": "2019-05-24T16:01:12.145645Z", - "running_time": "9µs" + "running_time": "9µs", } class MockResponse: - ''' + """ A helper class to generate a mock response based on status code. Mocks the `json` and `text` attributes of a requests.Response class. - ''' + """ status_code = None @@ -89,22 +89,17 @@ class MockPOSTResponse(MockResponse): "code": "parse-error", "detail": ( "The blackbird script could not be parsed. " - "Please fix errors in the script and try again.") - }, - 401: { - "code": "unauthenticated", - "detail": "Requires authentication" + "Please fix errors in the script and try again." + ), }, + 401: {"code": "unauthenticated", "detail": "Requires authentication"}, 409: { "code": "unsupported-circuit", - "detail": ( - "This circuit is not compatible with the specified hardware.") + "detail": ("This circuit is not compatible with the specified hardware."), }, 500: { "code": "server-error", - "detail": ( - "Unexpected server error. Please try your request again " - "later.") + "detail": ("Unexpected server error. Please try your request again " "later."), }, } @@ -112,19 +107,14 @@ class MockPOSTResponse(MockResponse): class MockGETResponse(MockResponse): possible_responses = { 200: SAMPLE_JOB_RESPONSE, - 401: { - "code": "unauthenticated", - "detail": "Requires authentication" - }, + 401: {"code": "unauthenticated", "detail": "Requires authentication"}, 404: { "code": "not-found", "detail": "The requested resource could not be found or does not exist.", }, 500: { "code": "server-error", - "detail": ( - "Unexpected server error. Please try your request again " - "later.") + "detail": ("Unexpected server error. Please try your request again " "later."), }, } @@ -149,7 +139,7 @@ def test_init_default_client(self): client = api_client.APIClient() assert client.USE_SSL is True assert client.AUTHENTICATION_TOKEN is None - assert client.BASE_URL == 'https://localhost' + assert client.BASE_URL == "https://localhost" assert client.HEADERS == {} def test_init_default_client_no_ssl(self): @@ -160,14 +150,14 @@ def test_init_default_client_no_ssl(self): client = api_client.APIClient(use_ssl=False) assert client.USE_SSL is False assert client.AUTHENTICATION_TOKEN is None - assert client.BASE_URL == 'http://localhost' + assert client.BASE_URL == "http://localhost" assert client.HEADERS == {} def test_init_custom_token_client(self): """ Test that the token is correctly set when initializing a client. """ - test_token = 'TEST' + test_token = "TEST" client = api_client.APIClient(authentication_token=test_token) assert client.AUTHENTICATION_TOKEN == test_token @@ -175,10 +165,11 @@ def test_init_custom_token_client_headers_set(self, monkeypatch): """ Test that set_authentication_token is being called when setting a custom token. """ - test_token = 'TEST' + test_token = "TEST" mock_set_authorization_header = MagicMock() monkeypatch.setattr( - api_client.APIClient, "set_authorization_header", mock_set_authorization_header) + api_client.APIClient, "set_authorization_header", mock_set_authorization_header + ) api_client.APIClient(authentication_token=test_token) mock_set_authorization_header.assert_called_once_with(test_token) @@ -190,7 +181,7 @@ def test_set_authorization_header(self): authentication_token = MagicMock() client.set_authorization_header(authentication_token) - assert client.HEADERS['Authorization'] == authentication_token + assert client.HEADERS["Authorization"] == authentication_token def test_load_configuration_from_file(self, client): """ @@ -204,15 +195,15 @@ def test_authenticate(self, client): Test that the client can authenticate correctly (not yet implemented). """ with pytest.raises(NotImplementedError): - username = 'TEST_USER' - password = 'TEST_PASSWORD' + username = "TEST_USER" + password = "TEST_PASSWORD" client.authenticate(username, password) def test_join_path(self, client): """ Test that two paths can be joined and separated by a forward slash. """ - assert client.join_path('jobs') == '{client.BASE_URL}/jobs'.format(client=client) + assert client.join_path("jobs") == "{client.BASE_URL}/jobs".format(client=client) @pytest.mark.api_client @@ -234,10 +225,10 @@ def test_join_path(self): Test that the resource path can be joined corectly with the base path """ mock_resource = MagicMock() - mock_resource.PATH = 'some-path' + mock_resource.PATH = "some-path" manager = ResourceManager(mock_resource, MagicMock()) - assert manager.join_path('test') == "some-path/test" + assert manager.join_path("test") == "some-path/test" def test_get_unsupported(self): """ @@ -260,7 +251,7 @@ def test_get(self, monkeypatch): mock_response = MagicMock() mock_client.get = MagicMock(return_value=mock_response) - mock_resource.SUPPORTED_METHODS = ('GET',) + mock_resource.SUPPORTED_METHODS = ("GET",) manager = ResourceManager(mock_resource, mock_client) monkeypatch.setattr(manager, "handle_response", MagicMock()) @@ -288,7 +279,7 @@ def test_create_id_already_exists(self): ObjectAlreadyCreatedException is raised. """ mock_resource = MagicMock() - mock_resource.SUPPORTED_METHODS = ('POST',) + mock_resource.SUPPORTED_METHODS = ("POST",) mock_resource.id = MagicMock() manager = ResourceManager(mock_resource, MagicMock()) with pytest.raises(ObjectAlreadyCreatedException): @@ -304,7 +295,7 @@ def test_create(self, monkeypatch): mock_response = MagicMock() mock_client.post = MagicMock(return_value=mock_response) - mock_resource.SUPPORTED_METHODS = ('POST',) + mock_resource.SUPPORTED_METHODS = ("POST",) mock_resource.id = None manager = ResourceManager(mock_resource, mock_client) @@ -329,11 +320,9 @@ def test_handle_response(self, monkeypatch): manager = ResourceManager(mock_resource, mock_client) - monkeypatch.setattr( - manager, "handle_success_response", mock_handle_success_response) + monkeypatch.setattr(manager, "handle_success_response", mock_handle_success_response) - monkeypatch.setattr( - manager, "handle_error_response", mock_handle_error_response) + monkeypatch.setattr(manager, "handle_error_response", mock_handle_error_response) manager.handle_response(mock_response) assert manager.http_status_code == mock_response.status_code @@ -371,10 +360,7 @@ def test_create_created(self, monkeypatch): Tests a successful Job creatioin with a mock POST response. Asserts that all fields on the Job instance have been set correctly and match the mock data. """ - monkeypatch.setattr( - requests, - "post", - lambda url, headers, data: MockPOSTResponse(201)) + monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(201)) job = Job() job.manager.create(params={}) @@ -386,10 +372,7 @@ def test_create_bad_request(self, monkeypatch): """ Tests that the correct error code is returned when a bad request is sent to the server. """ - monkeypatch.setattr( - requests, - "post", - lambda url, headers, data: MockPOSTResponse(400)) + monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(400)) job = Job() job.manager.create(params={}) From 2e35c7d3d61b4fb38bd42f271f6445c3a853477d Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 09:17:31 -0400 Subject: [PATCH 035/335] Minor change to test docstring. --- tests/api_client/test_api_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index aee6960bb..167f9f41c 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -211,7 +211,7 @@ class TestResourceManager: def test_init(self): """ Test that a resource manager instance can be initialized correctly with a resource and - client instance + client instance. Assets that both manager.resource and manager.client are set. """ resource = MagicMock() client = MagicMock() @@ -222,7 +222,7 @@ def test_init(self): def test_join_path(self): """ - Test that the resource path can be joined corectly with the base path + Test that the resource path can be joined corectly with the base path. """ mock_resource = MagicMock() mock_resource.PATH = "some-path" From 83edf3910d9d1a619dfef0c14ae5b575985c43f6 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 09:43:26 -0400 Subject: [PATCH 036/335] Implement support for loading configuration --- strawberryfields/api_client.py | 33 +++++++---------------------- strawberryfields/configuration.py | 9 +++++++- tests/api_client/test_api_client.py | 8 ++++--- 3 files changed, 21 insertions(+), 29 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index de4965079..7a3008792 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -77,12 +77,13 @@ import urllib import json -import os import warnings import dateutil.parser import requests +from strawberryfields import configuration + def join_path(base_path, path): """ @@ -121,10 +122,7 @@ class APIClient: An object that allows the user to connect to the Xanadu Platform API. """ - ALLOWED_HOSTNAMES = [ - "localhost", - "localhost:8080", - ] + ALLOWED_HOSTNAMES = ["localhost", "localhost:8080"] DEFAULT_HOSTNAME = "localhost" ENV_KEY_PREFIX = "SF_API_" @@ -146,7 +144,7 @@ def __init__(self, **kwargs): } # Try getting everything first from environment variables - configuration.update(self.get_configuration_from_environment()) + configuration.update(self.get_configuration_from_config()) # Override any values that are explicitly passed when initializing client configuration.update(kwargs) @@ -171,27 +169,12 @@ def __init__(self, **kwargs): # TODO: warn if no authentication token - def get_configuration_from_environment(self): - """ - Retrieve configuration from environment variables. The variables are defined as follows: - - SF_API_USE_SSL: True or False - - SF_API_HOSTNAME: The hostname of the server to connect to - - SF_API_AUTHENTICATION_TOKEN: The authentication token to use when connecting to the API - """ - configuration = { - "authentication_token": os.environ.get(self.ENV_AUTHENTICATION_TOKEN_KEY), - "hostname": os.environ.get(self.ENV_API_HOSTNAME_KEY), - "use_ssl": os.environ.get(self.ENV_USE_SSL_KEY) in ("1", "True", "TRUE"), - } - - return {key: value for key, value in configuration.items() if key in os.environ} - - def load_configuration_from_file(self): + def get_configuration_from_config(self): """ - Loads username, password, and/or authentication token from a config - file. + Retrieve configuration from environment variables or config file based on Strawberry Fields + configuration. """ - raise NotImplementedError() + return configuration.Configuration().api def authenticate(self, username, password): """ diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 97041e543..6f4a7ef36 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -61,7 +61,14 @@ Summary of options ------------------ -todo +SF_API_USE_SSL: + Whether to use SSL or not when connecting to the API. True or False. +SF_API_HOSTNAME: + The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed + hosts. +SF_API_AUTHENTICATION_TOKEN: + The authentication token to use when connecting to the API. Will be sent with every request in + the header. Summary of methods ------------------ diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 167f9f41c..0937b2d6b 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -19,6 +19,7 @@ import pytest import json from strawberryfields import api_client +from strawberryfields import configuration from strawberryfields.api_client import ( requests, Job, @@ -183,12 +184,13 @@ def test_set_authorization_header(self): client.set_authorization_header(authentication_token) assert client.HEADERS["Authorization"] == authentication_token - def test_load_configuration_from_file(self, client): + def test_get_configuration_from_config(self, client, monkeypatch): """ Test that the configuration is loaded from file correctly (not yet implemented). """ - with pytest.raises(NotImplementedError): - client.load_configuration_from_file() + mock_configuration = MagicMock() + monkeypatch.setattr(configuration, "Configuration", mock_configuration.Configuration) + assert client.get_configuration_from_config() == mock_configuration.Configuration().api def test_authenticate(self, client): """ From 20da1b97be2d7b8a0834367b482f04768fe3537d Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 09:45:57 -0400 Subject: [PATCH 037/335] Fix scope issue --- strawberryfields/api_client.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 7a3008792..160a70fb2 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -137,31 +137,31 @@ def __init__(self, **kwargs): # TODO: Load username, password, or authentication token from # configuration file - configuration = { + config = { "use_ssl": True, "hostname": self.DEFAULT_HOSTNAME, "authentication_token": None, } # Try getting everything first from environment variables - configuration.update(self.get_configuration_from_config()) + config.update(self.get_configuration_from_config()) # Override any values that are explicitly passed when initializing client - configuration.update(kwargs) + config.update(kwargs) - if configuration["hostname"] is None: + if config["hostname"] is None: raise ValueError("hostname parameter is missing") - if configuration["hostname"] not in self.ALLOWED_HOSTNAMES: + if config["hostname"] not in self.ALLOWED_HOSTNAMES: raise ValueError("hostname parameter not in allowed list") - self.USE_SSL = configuration["use_ssl"] + self.USE_SSL = config["use_ssl"] if not self.USE_SSL: warnings.warn("Connecting insecurely to API server", UserWarning) - self.HOSTNAME = configuration["hostname"] + self.HOSTNAME = config["hostname"] self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) - self.AUTHENTICATION_TOKEN = configuration["authentication_token"] + self.AUTHENTICATION_TOKEN = config["authentication_token"] self.HEADERS = {} if self.AUTHENTICATION_TOKEN is not None: From 3f7478a647d2b25617baaa656f0bfa2274f45607 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 26 Jun 2019 11:08:38 -0400 Subject: [PATCH 038/335] Minor change to docstring --- strawberryfields/api_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 160a70fb2..f5ad1c652 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -143,7 +143,7 @@ def __init__(self, **kwargs): "authentication_token": None, } - # Try getting everything first from environment variables + # Try getting everything first from configuration config.update(self.get_configuration_from_config()) # Override any values that are explicitly passed when initializing client From e573c594aaedeab3e6532fb9bb795c82f3ad4f0a Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 2 Jul 2019 13:18:33 -0400 Subject: [PATCH 039/335] bug fixes and minor changes - Fix ResourceManager.get to accept get without an ID - Fix response handling when no response is received from server - Pass correct value when fetching resource from ID field - Bug fix when refreshing related fields (result and circuit) - Add is_complete and is_failed properties --- strawberryfields/api_client.py | 48 +++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index f5ad1c652..4cf275124 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -137,11 +137,7 @@ def __init__(self, **kwargs): # TODO: Load username, password, or authentication token from # configuration file - config = { - "use_ssl": True, - "hostname": self.DEFAULT_HOSTNAME, - "authentication_token": None, - } + config = {"use_ssl": True, "hostname": self.DEFAULT_HOSTNAME, "authentication_token": None} # Try getting everything first from configuration config.update(self.get_configuration_from_config()) @@ -273,7 +269,7 @@ def join_path(self, path): """ return join_path(self.resource.PATH, path) - def get(self, resource_id): + def get(self, resource_id=None): """ Attempts to retrieve a particular record by sending a GET request to the appropriate endpoint. If successful, the resource @@ -314,11 +310,18 @@ def handle_response(self, response): Args: response (requests.Response): A response object to be parsed. """ - self.http_status_code = response.status_code - if response.status_code in (200, 201): - self.handle_success_response(response) + if hasattr(response, "status_code"): + self.http_status_code = response.status_code + + if response.status_code in (200, 201): + self.handle_success_response(response) + else: + self.handle_error_response(response) else: - self.handle_error_response(response) + self.handle_no_response() + + def handle_no_response(self): + warnings.warn("Your request could not be completed") def handle_success_response(self, response): """ @@ -339,6 +342,11 @@ def handle_error_response(self, response): # TODO: Improve error messaging and parse the actual error output (json). + # NOTE: This is here temporarily, however we should also handle actual errors returned + # from the server after a job is successfully submitted. + + self.resource.status.set("FAILED") + if response.status_code in (400, 404, 409): warnings.warn( "The server did not accept the request, and returned an error " @@ -375,6 +383,9 @@ def refresh_data(self, data): for field in self.resource.fields: field.set(data.get(field.name, None)) + if hasattr(self.resource, "refresh_data"): + self.resource.refresh_data() + class Resource: """ @@ -405,7 +416,7 @@ def reload(self): raise TypeError("Resource does not have an ID") if self.id: - self.manager.get(self.id) + self.manager.get(self.id.value) else: warnings.warn("Could not reload resource data", UserWarning) @@ -486,16 +497,23 @@ def __init__(self, client=None): super().__init__(client=client) + @property + def is_complete(self): + return self.status.value and self.status.value.upper() == "COMPLETE" + + @property + def is_failed(self): + # TODO this does not actually exist in the spec yet. + return self.status.value and self.status.value.upper() == "FAILED" + def refresh_data(self): """ Refresh the job fields and attach a JobResult and JobCircuit object to the Job instance. """ - super().refresh_data() - - if self.result is not None: + if self.result is None: self.result = JobResult(self.id, client=self.manager.client) - if self.circuit is not None: + if self.circuit is None: self.circuit = JobCircuit(self.id, client=self.manager.client) From 7844895978040610b80195c3e1b405ff29f3ee49 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 2 Jul 2019 13:29:36 -0400 Subject: [PATCH 040/335] Update location of configuration --- strawberryfields/api_client.py | 2 +- tests/api_client/test_api_client.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 4cf275124..0be4d3691 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -82,7 +82,7 @@ import dateutil.parser import requests -from strawberryfields import configuration +from strawberryfields._dev import configuration def join_path(base_path, path): diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 0937b2d6b..2c5f77fa6 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -19,7 +19,7 @@ import pytest import json from strawberryfields import api_client -from strawberryfields import configuration +from strawberryfields._dev import configuration from strawberryfields.api_client import ( requests, Job, From d4bc78f0702f1e5041ff8c39d63314f3619b49bd Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 2 Jul 2019 16:24:45 -0400 Subject: [PATCH 041/335] Initial commit for running jobs with StarshipEngine - Implemented parsing of job content and circuit code - Engine uses the APIClient to create a remote job - Engine polls the server for the job result - Modified Engine._run method (WIP) to support running jobs on chip0 This is a preliminary commit and is a work in progress --- strawberryfields/__init__.py | 4 +- strawberryfields/engine.py | 215 ++++++++++++++++++++++++------ strawberryfields/program.py | 9 +- strawberryfields/program_utils.py | 19 ++- tests/frontend/test_engine.py | 58 ++++++++ 5 files changed, 255 insertions(+), 50 deletions(-) diff --git a/strawberryfields/__init__.py b/strawberryfields/__init__.py index 3cb63618d..b3872a28a 100644 --- a/strawberryfields/__init__.py +++ b/strawberryfields/__init__.py @@ -57,13 +57,13 @@ Code details ~~~~~~~~~~~~ """ -from .engine import (Engine, LocalEngine) +from .engine import (Engine, LocalEngine, StarshipEngine) from .io import save, load from .program_utils import _convert as convert from .program import Program from ._version import __version__ -__all__ = ["Engine", "LocalEngine", "Program", "convert", "version", "save", "load", "about", "cite"] +__all__ = ["Engine", "LocalEngine", "StarshipEngine", "Program", "convert", "version", "save", "load", "about", "cite"] #: float: numerical value of hbar for the frontend (in the implicit units of position * momentum) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 6cab2fadc..00568dfa1 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -74,12 +74,16 @@ """ import abc +import uuid from collections.abc import Sequence from numpy import stack, shape +from time import sleep from .backends import load_backend from .backends.base import (NotApplicableError, BaseBackend) +from strawberryfields.api_client import (APIClient, Job) + class Result: """Result of a quantum computation. @@ -190,10 +194,10 @@ def _run(self, program, *, shots=1, compile_options={}, **kwargs): parts of a single computation. For each :class:`Program` instance given as input, the following happens: - * The Program instance is compiled and optimized for the target backend. + * The Program instance is compiled for the target backend. * The compiled program is executed on the backend. - * The measurement results of each subsystem (if any) are stored - in the :class:`.RegRef` instances of the corresponding Program, as well as in :attr:`~.samples`. + * The measurement results of each subsystem (if any) are stored in the :class:`.RegRef` + instances of the corresponding Program, as well as in :attr:`~.samples`. * The compiled program is appended to self.run_progs. Finally, the result of the computation is returned. @@ -209,7 +213,7 @@ def _run(self, program, *, shots=1, compile_options={}, **kwargs): Result: results of the computation """ - def _broadcast_nones(val, shots): + def _normalize_sample(val): """Helper function to ensure register values have same shape, even if not measured""" if val is None and shots > 1: return [None] * shots @@ -218,45 +222,47 @@ def _broadcast_nones(val, shots): if not isinstance(program, Sequence): program = [program] - try: - prev = self.run_progs[-1] if self.run_progs else None # previous program segment - for p in program: - if prev is None: - # initialize the backend - self._init_backend(p.init_num_subsystems) - else: - # there was a previous program segment - if not p.can_follow(prev): - raise RuntimeError("Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name)) - - # Copy the latest measured values in the RegRefs of p. - # We cannot copy from prev directly because it could be used in more than one engine. - for k, v in enumerate(self.samples): - p.reg_refs[k].val = v - - # if the program hasn't been compiled for this backend, do it now - if p.backend != self.backend_name: - p = p.compile(self.backend_name, **compile_options) # TODO: shots might be relevant for compilation? - p.lock() - - kwargs["shots"] = shots - # Note: by putting ``shots`` into keyword arguments, it allows for the - # signatures of methods in Operations to remain cleaner, since only - # Measurements need to know about shots - - self._run_program(p, **kwargs) - self.run_progs.append(p) - # store the latest measurement results - shots = kwargs.get("shots", 1) - self.samples = [_broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs)] - prev = p - except Exception as e: - raise e - else: - # program execution was successful - pass - - return Result(self.samples.copy()) + kwargs["shots"] = shots + # NOTE: by putting ``shots`` into keyword arguments, it allows for the + # signatures of methods in Operations to remain cleaner, since only + # Measurements need to know about shots + + if self.backend_name in getattr(self, "HARDWARE_BACKENDS", []): + p = program[0] + p = p.compile(self.backend_name) # TODO: does compile need to know about shots? + p.lock() + self.run_progs.append(p) + samples = self._run_program(p, **kwargs) + return Result(samples) + + prev = self.run_progs[-1] if self.run_progs else None # previous program segment + for p in program: + if prev is None: + # initialize the backend + self._init_backend(p.init_num_subsystems) + else: + # there was a previous program segment + if not p.can_follow(prev): + raise RuntimeError("Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name)) + + # Copy the latest measured values in the RegRefs of p. + # We cannot copy from prev directly because it could be used in more than one engine. + for k, v in enumerate(self.samples): + p.reg_refs[k].val = v + + # if the program hasn't been compiled for this backend, do it now + if p.backend != self.backend_name: + p = p.compile(self.backend_name, **compile_options) # TODO: shots might be relevant for compilation? + p.lock() + + self._run_program(p, **kwargs) + self.run_progs.append(p) + + reg_refs = [p.reg_refs[k].val for k in sorted(p.reg_refs)] + self.samples = map(_normalize_sample, reg_refs) + prev = p + + return Result(list(self.samples)) class LocalEngine(BaseEngine): @@ -340,4 +346,127 @@ def run(self, program, *, shots=1, compile_options={}, modes=None, state_options return result +class StarshipEngine(BaseEngine): + """ + Starship quantum program executor engine. + + Executes :class:`.Program` instances on the chosen remote backend, and makes + the results available via :class:`.Result`. + + Args: + backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance + """ + + API_DEFAULT_REFRESH_SECONDS = 0 + HARDWARE_BACKENDS = ('chip0', ) + + def __init__(self): + # Only chip0 backend supported initially. + backend = "chip0" + super().__init__(backend) + + self.client = APIClient(hostname="localhost") + self.jobs = [] + + def __str__(self): + return self.__class__.__name__ + '({})'.format(self.backend_name) + + def reset(self, backend_options=None): + """ + Reset must be called in order to submit a new job. This clears the job queue as well as + any ran Programs. + """ + if backend_options is None: + backend_options = {} + + super().reset(backend_options) + self.jobs.clear() + + def _init_backend(self, *args): + """ + TODO: This does not do anything rightn now. + """ + # Do nothing for now... + pass + + def generate_job_content(self, name, shots, blackbird_code): + """ + Generates a string representing the Blackbird code that will be sent to the server. + Assumes the current backend as the target. + + Args: + name (str): The name of the job to be created (e.g. StateTeleportation). + shots (int): The number of shots. + blackbird_code: The blackbird code of the job. + + Returns: + str: A string containing the job content to be sent to the server. + """ + target = self.backend_name + template = """ + name {name} + version 1.0 + target {target} (shots={shots}) + + {blackbird_code} + """.format( + name=name, + target=target, + shots=str(shots), + blackbird_code=blackbird_code) + + return "\n".join([l.strip() for l in template.strip().split("\n")]) + + def _run_program(self, program, **kwargs): + """ + Given a compiled program, gets the blackbird circuit code and creates (or resumes) a job + via the API. If the job is completed, returns the job result. + + A queued job can be interrupted by a KeyboardInterrupt event, at which point if the job ID + was retrieved from the server, the job will be accessible via engine.jobs. + + Args: + program (strawberryfields.program.Program): A program instance to be executed remotely. + + Returns: + (list): A list representing the result samples + + Raises: + Exception: In case a job could not be submitted or completed. + TypeError: In case a job is already queued and a user is trying to submit a new job. + """ + blackbird_code = program.get_blackbird_syntax() + job_content = self.generate_job_content(blackbird_code=blackbird_code, **kwargs) + + if self.jobs: + raise TypeError("A job is already queued. Please reset the engine and try again.") + + job = Job(client=self.client) + job.manager.create(circuit=job_content) + self.jobs.append(job) + + try: + while not job.is_complete: + job.reload() + if job.is_failed: + raise Exception("The job could not be submitted or completed.") + sleep(self.API_DEFAULT_REFRESH_SECONDS) + + job.result.manager.get() + return job.result.result.value + except KeyboardInterrupt: + if job.id: + print("Job {} is queued in the background.".format(job.id.value)) + else: + raise Exception( + "Job could not be sent to server, please try again later.") + + def run(self, program, shots=1, name=None, **kwargs): + """ + Compile a given program and queue a job in the Starship. + """ + name = name or str(uuid.uuid4()) + return super()._run(program, shots=shots, name=name, **kwargs) + + Engine = LocalEngine # alias for backwards compatibility diff --git a/strawberryfields/program.py b/strawberryfields/program.py index 6001fc661..1f75199e3 100644 --- a/strawberryfields/program.py +++ b/strawberryfields/program.py @@ -171,7 +171,6 @@ def _print_list(i, q, print_fn=print): print_fn() - class Program: """Represents a quantum circuit. @@ -265,6 +264,14 @@ def print(self, print_fn=print): for k in self.circuit: print_fn(k) + def get_blackbird_syntax(self): + """ + Returns a string containing the Blackbird syntax for this program. + """ + + commands = [command.get_blackbird_syntax() for command in self.circuit] + return "\n".join(commands) + @property def context(self): """Syntactic sugar for defining a Program using the :code:`with` statement. diff --git a/strawberryfields/program_utils.py b/strawberryfields/program_utils.py index 1101d4eb7..760984798 100644 --- a/strawberryfields/program_utils.py +++ b/strawberryfields/program_utils.py @@ -34,7 +34,6 @@ # cf. _pydecimal.py in the python standard distribution. - def _convert(func): r"""Decorator for converting user defined functions to a :class:`RegRefTransform`. @@ -69,6 +68,7 @@ class RegRefError(IndexError): E.g., trying to apply a gate to a nonexistent or deleted subsystem. """ + class CircuitError(RuntimeError): """Exception raised by :class:`Program` when it encounters an illegal operation in the quantum circuit. @@ -76,6 +76,7 @@ class CircuitError(RuntimeError): E.g., trying to use a measurement result before it is available. """ + class MergeFailure(RuntimeError): """Exception raised by :meth:`strawberryfields.ops.Operation.merge` when an attempted merge fails. @@ -109,11 +110,21 @@ def __init__(self, op, reg): def __str__(self): """Print the command using Blackbird syntax.""" - temp = str(self.op) + return self.get_blackbird_syntax() + + def get_blackbird_syntax(self): + """ + Return a string containing the Blackbird syntac. + """ + + operation = str(self.op) if self.op.ns == 0: # op takes no subsystems as parameters, do not print anything more - return temp - return '{} | ({})'.format(temp, ", ".join([str(rr) for rr in self.reg])) + code = operation + else: + subsystems = ", ".join([str(r) for r in self.reg]) + code = "{} | ({})".format(operation, subsystems) + return code def __lt__(self, other): # Needed as a tiebreaker for NetworkX lexicographical_topological_sort() diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 5ce642907..a9ef7ef04 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -156,3 +156,61 @@ def inspect(): eng.reset() eng.run([p1, p2]) assert inspect() == expected2 + + +class TestStarshipEngine: + """ + Tests various methods on the remote engine StarshipEngine. + """ + + def test_job_submitted(self, monkeypatch): + """ + Test that a job is successfully submitted to the APIClient instance. + """ + + # Note this is currently more of an integration test, currently a WIP / under development. + + from unittest.mock import MagicMock + + from strawberryfields.ops import S2gate, MeasureFock, Rgate, BSgate + from strawberryfields import StarshipEngine + from strawberryfields.api_client import APIClient + + engine = StarshipEngine() + + # We don't want to actually send any requests, though we should make sure POST was called + mock_api_client_post = MagicMock() + mock_get= MagicMock() + mock_get_response = MagicMock() + mock_get_response.status_code = 200 + mock_get_response.json.return_value = {'status': 'COMPLETE', 'id': 1234} + mock_get.return_value = mock_get_response + + mock_post_response = MagicMock() + mock_post_response.status_code = 201 + mock_post_response.json.return_value = {'status': 'QUEUED', 'id': 1234} + mock_api_client_post.return_value = mock_post_response + + monkeypatch.setattr(APIClient, "post", mock_api_client_post) + monkeypatch.setattr(APIClient, "get", mock_get) + + prog = sf.Program(4) + with prog.context as q: + S2gate(2) | [0, 2] + S2gate(2) | [1, 3] + Rgate(3) | 0 + BSgate() | [0, 1] + Rgate(3) | 0 + Rgate(3) | 1 + Rgate(3) | 2 + BSgate() | [2, 3] + Rgate(3) | 2 + Rgate(3) | 3 + MeasureFock() | [0] + MeasureFock() | [1] + MeasureFock() | [2] + MeasureFock() | [3] + + engine.run(prog) + + mock_api_client_post.assert_called_once() From cad3d7beaa280a8f5dd0a5c8eab1342b061e796d Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 10:07:40 -0400 Subject: [PATCH 042/335] Update ALLOWED_HOSTNAMES, add User-Agent header, minor fixes Minor fixes include: - Fix ResourceManager.join_path when resource_id is None - Add docstrings where missing - Do not attempt to set resource.status to "FAILED" manually --- strawberryfields/api_client.py | 29 ++++++++++++++++++----------- tests/api_client/test_api_client.py | 4 ++-- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 0be4d3691..9d7819fa6 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -122,7 +122,10 @@ class APIClient: An object that allows the user to connect to the Xanadu Platform API. """ - ALLOWED_HOSTNAMES = ["localhost", "localhost:8080"] + USER_AGENT = "strawberryfields-api-client/0.1" + + ALLOWED_HOSTNAMES = ["localhost", "localhost:8080", "platform.strawberryfields.ai"] + DEFAULT_HOSTNAME = "localhost" ENV_KEY_PREFIX = "SF_API_" @@ -158,13 +161,11 @@ def __init__(self, **kwargs): self.HOSTNAME = config["hostname"] self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) self.AUTHENTICATION_TOKEN = config["authentication_token"] - self.HEADERS = {} + self.HEADERS = {"User-Agent": self.USER_AGENT} if self.AUTHENTICATION_TOKEN is not None: self.set_authorization_header(self.AUTHENTICATION_TOKEN) - # TODO: warn if no authentication token - def get_configuration_from_config(self): """ Retrieve configuration from environment variables or config file based on Strawberry Fields @@ -281,7 +282,10 @@ def get(self, resource_id=None): if "GET" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("GET method on this resource is not supported") - response = self.client.get(self.join_path(str(resource_id))) + if resource_id is not None: + response = self.client.get(self.join_path(str(resource_id))) + else: + response = self.client.get(self.resource.PATH) self.handle_response(response) def create(self, **params): @@ -321,6 +325,9 @@ def handle_response(self, response): self.handle_no_response() def handle_no_response(self): + """ + Placeholder method to handle an unsuccessful request (e.g. due to no network connection). + """ warnings.warn("Your request could not be completed") def handle_success_response(self, response): @@ -342,11 +349,6 @@ def handle_error_response(self, response): # TODO: Improve error messaging and parse the actual error output (json). - # NOTE: This is here temporarily, however we should also handle actual errors returned - # from the server after a job is successfully submitted. - - self.resource.status.set("FAILED") - if response.status_code in (400, 404, 409): warnings.warn( "The server did not accept the request, and returned an error " @@ -499,11 +501,16 @@ def __init__(self, client=None): @property def is_complete(self): + """ + Returns True if the job status is "COMPLETE". Case insensitive. Returns False otherwise. + """ return self.status.value and self.status.value.upper() == "COMPLETE" @property def is_failed(self): - # TODO this does not actually exist in the spec yet. + """ + Returns True if the job status is "FAILED". Case insensitive. Returns False otherwise. + """ return self.status.value and self.status.value.upper() == "FAILED" def refresh_data(self): diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index 2c5f77fa6..d2c733415 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -141,7 +141,7 @@ def test_init_default_client(self): assert client.USE_SSL is True assert client.AUTHENTICATION_TOKEN is None assert client.BASE_URL == "https://localhost" - assert client.HEADERS == {} + assert client.HEADERS['User-Agent'] == client.USER_AGENT def test_init_default_client_no_ssl(self): """ @@ -152,7 +152,7 @@ def test_init_default_client_no_ssl(self): assert client.USE_SSL is False assert client.AUTHENTICATION_TOKEN is None assert client.BASE_URL == "http://localhost" - assert client.HEADERS == {} + assert client.HEADERS['User-Agent'] == client.USER_AGENT def test_init_custom_token_client(self): """ From 3a48b1933356a3b7e00fa12866c33eaf8bc13e14 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 14:11:52 -0400 Subject: [PATCH 043/335] Fix typo in configuration --- default_config.toml | 2 +- strawberryfields/_dev/configuration.py | 4 ++-- tests/frontend/test_configuration.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/default_config.toml b/default_config.toml index c979171fe..2d0d55bfc 100644 --- a/default_config.toml +++ b/default_config.toml @@ -5,7 +5,7 @@ [api] # Options for the Strawberry Fields Cloud API # Fill in your authentication -authentatication_token = None # example token form: 071cdcce-9241-4965-93af-4a4dbc739135 +authentication_token = None # example token form: 071cdcce-9241-4965-93af-4a4dbc739135 # Fill in the hostname of the Cloud API hostname = "localhost" # Whether Strawberry Fields should use SSL to connect to the API diff --git a/strawberryfields/_dev/configuration.py b/strawberryfields/_dev/configuration.py index 90dae5403..de55131f0 100644 --- a/strawberryfields/_dev/configuration.py +++ b/strawberryfields/_dev/configuration.py @@ -54,7 +54,7 @@ [api] # Options for the Strawberry Fields Cloud API - authentatication_token = "071cdcce-9241-4965-93af-4a4dbc739135" + authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true @@ -95,7 +95,7 @@ log.getLogger() -DEFAULT_CONFIG = {"api": {"authentatication_token": "", "hostname": "localhost", "use_ssl": True}} +DEFAULT_CONFIG = {"api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True}} class ConfigurationError(Exception): diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 2a74cacbf..3582cf065 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -27,14 +27,14 @@ TEST_FILE = """\ [api] # Options for the Strawberry Fields Cloud API -authentatication_token = "071cdcce-9241-4965-93af-4a4dbc739135" +authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true """ EXPECTED_CONFIG = { "api": { - "authentatication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", "hostname": "localhost", "use_ssl": True, } From 8296c733331c4849d8442ee3b032d15e51530d6c Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 14:17:07 -0400 Subject: [PATCH 044/335] Multiple changes and bug fixes, added unit tests - Change default API refresh to 1 second - Ability to pass APIClient parameters when creating an engine - Refactor and simplify how job content string is parsed - Use `to_blackbird` method to fetch Blackbird code - Add tests for new code --- strawberryfields/engine.py | 27 +++--- strawberryfields/program.py | 8 -- strawberryfields/program_utils.py | 6 +- tests/frontend/test_engine.py | 151 ++++++++++++++++++++++++++++-- 4 files changed, 159 insertions(+), 33 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 00568dfa1..736176381 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -83,6 +83,7 @@ from .backends.base import (NotApplicableError, BaseBackend) from strawberryfields.api_client import (APIClient, Job) +from strawberryfields.io import to_blackbird class Result: @@ -357,15 +358,16 @@ class StarshipEngine(BaseEngine): backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance """ - API_DEFAULT_REFRESH_SECONDS = 0 + API_DEFAULT_REFRESH_SECONDS = 1 HARDWARE_BACKENDS = ('chip0', ) - def __init__(self): + def __init__(self, api_client_params=None): # Only chip0 backend supported initially. backend = "chip0" super().__init__(backend) - self.client = APIClient(hostname="localhost") + api_client_params = api_client_params or {} + self.client = APIClient(**api_client_params) self.jobs = [] def __str__(self): @@ -384,7 +386,7 @@ def reset(self, backend_options=None): def _init_backend(self, *args): """ - TODO: This does not do anything rightn now. + TODO: This does not do anything right now. """ # Do nothing for now... pass @@ -403,20 +405,17 @@ def generate_job_content(self, name, shots, blackbird_code): str: A string containing the job content to be sent to the server. """ target = self.backend_name - template = """ - name {name} - version 1.0 - target {target} (shots={shots}) - - {blackbird_code} - """.format( + return "\n".join([ + "name {name}", + "version 1.0", + "target {target} (shots={shots})", + "", + "{blackbird_code}"]).format( name=name, target=target, shots=str(shots), blackbird_code=blackbird_code) - return "\n".join([l.strip() for l in template.strip().split("\n")]) - def _run_program(self, program, **kwargs): """ Given a compiled program, gets the blackbird circuit code and creates (or resumes) a job @@ -435,7 +434,7 @@ def _run_program(self, program, **kwargs): Exception: In case a job could not be submitted or completed. TypeError: In case a job is already queued and a user is trying to submit a new job. """ - blackbird_code = program.get_blackbird_syntax() + blackbird_code = to_blackbird(program) job_content = self.generate_job_content(blackbird_code=blackbird_code, **kwargs) if self.jobs: diff --git a/strawberryfields/program.py b/strawberryfields/program.py index 1f75199e3..60891c916 100644 --- a/strawberryfields/program.py +++ b/strawberryfields/program.py @@ -264,14 +264,6 @@ def print(self, print_fn=print): for k in self.circuit: print_fn(k) - def get_blackbird_syntax(self): - """ - Returns a string containing the Blackbird syntax for this program. - """ - - commands = [command.get_blackbird_syntax() for command in self.circuit] - return "\n".join(commands) - @property def context(self): """Syntactic sugar for defining a Program using the :code:`with` statement. diff --git a/strawberryfields/program_utils.py b/strawberryfields/program_utils.py index 760984798..af431e4ba 100644 --- a/strawberryfields/program_utils.py +++ b/strawberryfields/program_utils.py @@ -109,12 +109,8 @@ def __init__(self, op, reg): self.reg = reg def __str__(self): - """Print the command using Blackbird syntax.""" - return self.get_blackbird_syntax() - - def get_blackbird_syntax(self): """ - Return a string containing the Blackbird syntac. + Return a string containing the Blackbird syntax. """ operation = str(self.op) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index a9ef7ef04..197c6eff1 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -19,6 +19,8 @@ import strawberryfields as sf from strawberryfields import ops from strawberryfields.backends.base import BaseBackend +from unittest.mock import MagicMock +from strawberryfields import StarshipEngine @pytest.fixture @@ -158,29 +160,166 @@ def inspect(): assert inspect() == expected2 +@pytest.fixture +def starship_engine(monkeypatch): + """ + Create a reusable StarshipEngine instance without a real APIClient. + """ + mock_api_client = MagicMock() + monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) + engine = StarshipEngine() + monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) + return engine + + class TestStarshipEngine: """ Tests various methods on the remote engine StarshipEngine. """ - def test_job_submitted(self, monkeypatch): + def test_init(self, monkeypatch): + """ + Tests that a StarshipEngine instance is correctly initialized when additional APIClient + parameters are passed. + """ + mock_api_client = MagicMock() + mock_api_client_params = {'param': MagicMock()} + monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) + engine = StarshipEngine(mock_api_client_params) + mock_api_client.assert_called_once_with(param=mock_api_client_params['param']) + assert engine.jobs == [] + + def test_reset(self, starship_engine): + """ + Tests that StarshipEngine.jobs is correctly cleared when callling StarshipEnging.reset. + """ + starship_engine.jobs.append(MagicMock()) + assert len(starship_engine.jobs) == 1 + starship_engine.reset() + assert len(starship_engine.jobs) == 0 + + def test_generate_job_content(self, starship_engine): + """ + Tests that StarshipEngine.generate_job_content returns the correct string given name, + shots, and blackbird_code parameters. + """ + name = MagicMock() + shots = MagicMock() + blackbird_code = MagicMock() + + output = starship_engine.generate_job_content(name, shots, blackbird_code) + lines = output.split('\n') + assert lines[0] == "name {}".format(name) + assert lines[1] == "version 1.0" + assert lines[2] == "target {} (shots={})".format(starship_engine.backend_name, shots) + assert lines[3] == "" + assert lines[4] == str(blackbird_code) + + def test__run_program(self, starship_engine, monkeypatch): """ - Test that a job is successfully submitted to the APIClient instance. + Tests StarshipEngine._run_program. Asserts that a program is converted to blackbird code, + compiled into a job content string that the API can accept, and that a Job is submitted via + the APIClient to the API with the correct attributes. Also asserts that a completed job's + result samples are returned. """ + mock_to_blackbird = MagicMock() + mock_generate_job_content = MagicMock() + mock_job = MagicMock() + program = MagicMock() - # Note this is currently more of an integration test, currently a WIP / under development. + mock_job.is_complete = True - from unittest.mock import MagicMock + monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) + monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) + monkeypatch.setattr("strawberryfields.engine.Job", mock_job) + + some_params = {'param': MagicMock()} + + result = starship_engine._run_program(program, **some_params) + + mock_to_blackbird.assert_called_once_with(program) + mock_generate_job_content.assert_called_once_with( + blackbird_code=mock_to_blackbird(program), param=some_params["param"]) + + mock_job.assert_called_once_with(client=starship_engine.client) + mock_job_instance = mock_job(client=starship_engine.client) + mock_job_instance.manager.create.assert_called_once_with( + circuit=mock_generate_job_content(mock_to_blackbird(program))) + mock_job_instance.result.manager.get.assert_called_once() + + assert starship_engine.jobs == [mock_job(client=starship_engine.client)] + assert result == mock_job_instance.result.result.value + + def test__run(self, starship_engine, monkeypatch): + """ + Tests StarshipEngine._run, with the assumption that the backend is a hardware backend + that supports running only a single program. This test ensures that a program is compiled + for the hardware backend, is locked, is added to self.run_progs, that it is run and that + a Result object is returned populated with the result samples. + """ + + mock_hardware_backend_name = str(MagicMock()) + mock_run_program = MagicMock() + mock_program = MagicMock() + mock_result = MagicMock() + mock_shots = MagicMock() + + monkeypatch.setattr(starship_engine, "backend_name", mock_hardware_backend_name) + monkeypatch.setattr(starship_engine, "HARDWARE_BACKENDS", [mock_hardware_backend_name]) + monkeypatch.setattr(starship_engine, "_run_program", mock_run_program) + monkeypatch.setattr("strawberryfields.engine.Result", mock_result) + + result = starship_engine._run(mock_program, shots=mock_shots) + + assert starship_engine.backend_name in starship_engine.HARDWARE_BACKENDS + mock_program.compile.assert_called_once_with(starship_engine.backend_name) + mock_compiled_program = mock_program.compile(starship_engine.backend_name) + mock_compiled_program.lock.assert_called_once() + mock_run_program.assert_called_once_with(mock_compiled_program, shots=mock_shots) + mock_samples = mock_run_program(mock_compiled_program, shots=mock_shots) + assert starship_engine.run_progs == [mock_compiled_program] + assert result == mock_result(mock_samples) + + def test_run(self, starship_engine, monkeypatch): + """ + Tests StarshipEngine.run. It is expected that StarshipEnging._run is called with the correct + parameters. + """ + mock_run = MagicMock() + monkeypatch.setattr("strawberryfields.engine.BaseEngine._run", mock_run) + + name = MagicMock() + program = MagicMock() + shots = MagicMock() + params = {'param': MagicMock()} + + starship_engine.run(program, shots, name, **params) + mock_run.assert_called_once_with(program, shots=shots, name=name, param=params['param']) + + def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): + """ + This is an integration test that tests and actual program being submitted to a mock API, and + how the engine handles a successful response from the server (first by queuing a job then by + fetching the result.) + """ + + # NOTE: this is currently more of an integration test, currently a WIP / under development. + + import os + from unittest.mock import MagicMock from strawberryfields.ops import S2gate, MeasureFock, Rgate, BSgate from strawberryfields import StarshipEngine from strawberryfields.api_client import APIClient + from strawberryfields._dev import configuration as conf - engine = StarshipEngine() + api_client_params = {'hostname': "localhost"} + engine = StarshipEngine(api_client_params) + monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) # We don't want to actually send any requests, though we should make sure POST was called mock_api_client_post = MagicMock() - mock_get= MagicMock() + mock_get = MagicMock() mock_get_response = MagicMock() mock_get_response.status_code = 200 mock_get_response.json.return_value = {'status': 'COMPLETE', 'id': 1234} From 3c4d2c0e81524097a485c00b38e8edf540ed9783 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 14:35:09 -0400 Subject: [PATCH 045/335] Style fixes --- tests/frontend/test_engine.py | 104 +++++++++++++++------------------- 1 file changed, 47 insertions(+), 57 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 197c6eff1..cf29b5a4b 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -21,6 +21,8 @@ from strawberryfields.backends.base import BaseBackend from unittest.mock import MagicMock from strawberryfields import StarshipEngine +from strawberryfields.api_client import APIClient +from strawberryfields.ops import S2gate, MeasureFock, Rgate, BSgate @pytest.fixture @@ -28,6 +30,7 @@ def eng(backend): """Engine fixture.""" return sf.LocalEngine(backend) + @pytest.fixture def prog(backend): """Program fixture.""" @@ -37,6 +40,18 @@ def prog(backend): return prog +@pytest.fixture +def starship_engine(monkeypatch): + """ + Create a reusable StarshipEngine fixture without a real APIClient. + """ + mock_api_client = MagicMock() + monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) + engine = StarshipEngine() + monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) + return engine + + class TestEngine: """Test basic engine functionality""" @@ -47,7 +62,7 @@ def test_load_backend(self): def test_bad_backend(self): """Backend must be a string or a BaseBackend instance.""" - with pytest.raises(TypeError, match='backend must be a string or a BaseBackend instance'): + with pytest.raises(TypeError, match="backend must be a string or a BaseBackend instance"): eng = sf.LocalEngine(0) @@ -130,11 +145,7 @@ def inspect(): ops.Sgate(r) | q[1] eng.run(p1) - expected1 = [ - "Run 0:", - "Dgate({}, 0) | (q[1])".format(a), - "Sgate({}, 0) | (q[1])".format(r), - ] + expected1 = ["Run 0:", "Dgate({}, 0) | (q[1])".format(a), "Sgate({}, 0) | (q[1])".format(r)] assert inspect() == expected1 # run the program again @@ -148,10 +159,7 @@ def inspect(): ops.Rgate(r) | q[1] eng.run(p2) - expected2 = expected1 + [ - "Run 1:", - "Rgate({}) | (q[1])".format(r), - ] + expected2 = expected1 + ["Run 1:", "Rgate({}) | (q[1])".format(r)] assert inspect() == expected2 # reapply history @@ -160,18 +168,6 @@ def inspect(): assert inspect() == expected2 -@pytest.fixture -def starship_engine(monkeypatch): - """ - Create a reusable StarshipEngine instance without a real APIClient. - """ - mock_api_client = MagicMock() - monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine() - monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) - return engine - - class TestStarshipEngine: """ Tests various methods on the remote engine StarshipEngine. @@ -183,15 +179,15 @@ def test_init(self, monkeypatch): parameters are passed. """ mock_api_client = MagicMock() - mock_api_client_params = {'param': MagicMock()} + mock_api_client_params = {"param": MagicMock()} monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) engine = StarshipEngine(mock_api_client_params) - mock_api_client.assert_called_once_with(param=mock_api_client_params['param']) + mock_api_client.assert_called_once_with(param=mock_api_client_params["param"]) assert engine.jobs == [] def test_reset(self, starship_engine): """ - Tests that StarshipEngine.jobs is correctly cleared when callling StarshipEnging.reset. + Tests that StarshipEngine.jobs is correctly cleared when callling StarshipEngine.reset. """ starship_engine.jobs.append(MagicMock()) assert len(starship_engine.jobs) == 1 @@ -208,7 +204,7 @@ def test_generate_job_content(self, starship_engine): blackbird_code = MagicMock() output = starship_engine.generate_job_content(name, shots, blackbird_code) - lines = output.split('\n') + lines = output.split("\n") assert lines[0] == "name {}".format(name) assert lines[1] == "version 1.0" assert lines[2] == "target {} (shots={})".format(starship_engine.backend_name, shots) @@ -233,19 +229,20 @@ def test__run_program(self, starship_engine, monkeypatch): monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) monkeypatch.setattr("strawberryfields.engine.Job", mock_job) - some_params = {'param': MagicMock()} - + some_params = {"param": MagicMock()} result = starship_engine._run_program(program, **some_params) mock_to_blackbird.assert_called_once_with(program) mock_generate_job_content.assert_called_once_with( - blackbird_code=mock_to_blackbird(program), param=some_params["param"]) + blackbird_code=mock_to_blackbird(program), param=some_params["param"] + ) mock_job.assert_called_once_with(client=starship_engine.client) mock_job_instance = mock_job(client=starship_engine.client) mock_job_instance.manager.create.assert_called_once_with( - circuit=mock_generate_job_content(mock_to_blackbird(program))) + circuit=mock_generate_job_content(mock_to_blackbird(program)) + ) mock_job_instance.result.manager.get.assert_called_once() assert starship_engine.jobs == [mock_job(client=starship_engine.client)] @@ -283,7 +280,7 @@ def test__run(self, starship_engine, monkeypatch): def test_run(self, starship_engine, monkeypatch): """ - Tests StarshipEngine.run. It is expected that StarshipEnging._run is called with the correct + Tests StarshipEngine.run. It is expected that StarshipEngine._run is called with the correct parameters. """ mock_run = MagicMock() @@ -292,10 +289,10 @@ def test_run(self, starship_engine, monkeypatch): name = MagicMock() program = MagicMock() shots = MagicMock() - params = {'param': MagicMock()} + params = {"param": MagicMock()} starship_engine.run(program, shots, name, **params) - mock_run.assert_called_once_with(program, shots=shots, name=name, param=params['param']) + mock_run.assert_called_once_with(program, shots=shots, name=name, param=params["param"]) def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): """ @@ -306,14 +303,7 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): # NOTE: this is currently more of an integration test, currently a WIP / under development. - import os - from unittest.mock import MagicMock - from strawberryfields.ops import S2gate, MeasureFock, Rgate, BSgate - from strawberryfields import StarshipEngine - from strawberryfields.api_client import APIClient - from strawberryfields._dev import configuration as conf - - api_client_params = {'hostname': "localhost"} + api_client_params = {"hostname": "localhost"} engine = StarshipEngine(api_client_params) monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) @@ -322,12 +312,12 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): mock_get = MagicMock() mock_get_response = MagicMock() mock_get_response.status_code = 200 - mock_get_response.json.return_value = {'status': 'COMPLETE', 'id': 1234} + mock_get_response.json.return_value = {"status": "COMPLETE", "id": 1234} mock_get.return_value = mock_get_response mock_post_response = MagicMock() mock_post_response.status_code = 201 - mock_post_response.json.return_value = {'status': 'QUEUED', 'id': 1234} + mock_post_response.json.return_value = {"status": "QUEUED", "id": 1234} mock_api_client_post.return_value = mock_post_response monkeypatch.setattr(APIClient, "post", mock_api_client_post) @@ -335,20 +325,20 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): prog = sf.Program(4) with prog.context as q: - S2gate(2) | [0, 2] - S2gate(2) | [1, 3] - Rgate(3) | 0 - BSgate() | [0, 1] - Rgate(3) | 0 - Rgate(3) | 1 - Rgate(3) | 2 - BSgate() | [2, 3] - Rgate(3) | 2 - Rgate(3) | 3 - MeasureFock() | [0] - MeasureFock() | [1] - MeasureFock() | [2] - MeasureFock() | [3] + S2gate(2) | [0, 2] + S2gate(2) | [1, 3] + Rgate(3) | 0 + BSgate() | [0, 1] + Rgate(3) | 0 + Rgate(3) | 1 + Rgate(3) | 2 + BSgate() | [2, 3] + Rgate(3) | 2 + Rgate(3) | 3 + MeasureFock() | [0] + MeasureFock() | [1] + MeasureFock() | [2] + MeasureFock() | [3] engine.run(prog) From ed4f58615e621fa8a8f7716749440c3bf0980a13 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 14:11:52 -0400 Subject: [PATCH 046/335] Fix typo in configuration --- default_config.toml | 2 +- strawberryfields/_dev/configuration.py | 4 ++-- tests/frontend/test_configuration.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/default_config.toml b/default_config.toml index c979171fe..2d0d55bfc 100644 --- a/default_config.toml +++ b/default_config.toml @@ -5,7 +5,7 @@ [api] # Options for the Strawberry Fields Cloud API # Fill in your authentication -authentatication_token = None # example token form: 071cdcce-9241-4965-93af-4a4dbc739135 +authentication_token = None # example token form: 071cdcce-9241-4965-93af-4a4dbc739135 # Fill in the hostname of the Cloud API hostname = "localhost" # Whether Strawberry Fields should use SSL to connect to the API diff --git a/strawberryfields/_dev/configuration.py b/strawberryfields/_dev/configuration.py index 90dae5403..de55131f0 100644 --- a/strawberryfields/_dev/configuration.py +++ b/strawberryfields/_dev/configuration.py @@ -54,7 +54,7 @@ [api] # Options for the Strawberry Fields Cloud API - authentatication_token = "071cdcce-9241-4965-93af-4a4dbc739135" + authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true @@ -95,7 +95,7 @@ log.getLogger() -DEFAULT_CONFIG = {"api": {"authentatication_token": "", "hostname": "localhost", "use_ssl": True}} +DEFAULT_CONFIG = {"api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True}} class ConfigurationError(Exception): diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 2a74cacbf..3582cf065 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -27,14 +27,14 @@ TEST_FILE = """\ [api] # Options for the Strawberry Fields Cloud API -authentatication_token = "071cdcce-9241-4965-93af-4a4dbc739135" +authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true """ EXPECTED_CONFIG = { "api": { - "authentatication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", "hostname": "localhost", "use_ssl": True, } From 2847a547f7cb89243fe2a81c4e904c44d0548c44 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 3 Jul 2019 16:20:25 -0400 Subject: [PATCH 047/335] Add _queue_job method to simplify, add fail test --- strawberryfields/engine.py | 110 ++++++++++++++++++++++------------ tests/frontend/test_engine.py | 48 ++++++++++----- 2 files changed, 105 insertions(+), 53 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 736176381..09f68b282 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -80,9 +80,9 @@ from time import sleep from .backends import load_backend -from .backends.base import (NotApplicableError, BaseBackend) +from .backends.base import NotApplicableError, BaseBackend -from strawberryfields.api_client import (APIClient, Job) +from strawberryfields.api_client import APIClient, Job from strawberryfields.io import to_blackbird @@ -92,6 +92,7 @@ class Result: Represents the results of the execution of a quantum program. Returned by :meth:`.BaseEngine.run`. """ + def __init__(self, samples): #: BaseState: quantum state object returned by a local backend, if any self.state = None @@ -103,7 +104,9 @@ def __init__(self, samples): def __str__(self): """String representation.""" - return 'Result: {} subsystems, state: {}\n samples: {}'.format(len(self.samples), self.state, self.samples) + return "Result: {} subsystems, state: {}\n samples: {}".format( + len(self.samples), self.state, self.samples + ) class BaseEngine(abc.ABC): @@ -113,6 +116,7 @@ class BaseEngine(abc.ABC): backend (str): backend short name backend_options (Dict[str, Any]): keyword arguments for the backend """ + def __init__(self, backend, backend_options=None): if backend_options is None: backend_options = {} @@ -164,7 +168,7 @@ def print_applied(self, print_fn=print): print_fn (function): optional custom function to use for string printing. """ for k, r in enumerate(self.run_progs): - print_fn('Run {}:'.format(k)) + print_fn("Run {}:".format(k)) r.print(print_fn) @abc.abstractmethod @@ -244,7 +248,9 @@ def _normalize_sample(val): else: # there was a previous program segment if not p.can_follow(prev): - raise RuntimeError("Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name)) + raise RuntimeError( + "Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name) + ) # Copy the latest measured values in the RegRefs of p. # We cannot copy from prev directly because it could be used in more than one engine. @@ -253,7 +259,9 @@ def _normalize_sample(val): # if the program hasn't been compiled for this backend, do it now if p.backend != self.backend_name: - p = p.compile(self.backend_name, **compile_options) # TODO: shots might be relevant for compilation? + p = p.compile( + self.backend_name, **compile_options + ) # TODO: shots might be relevant for compilation? p.lock() self._run_program(p, **kwargs) @@ -276,6 +284,7 @@ class LocalEngine(BaseEngine): backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance backend_options (Dict[str, Any]): keyword arguments to be passed to the backend """ + def __init__(self, backend, *, backend_options={}): super().__init__(backend, backend_options) @@ -287,17 +296,17 @@ def __init__(self, backend, *, backend_options={}): self.backend_name = backend._short_name self.backend = backend else: - raise TypeError('backend must be a string or a BaseBackend instance.') + raise TypeError("backend must be a string or a BaseBackend instance.") def __str__(self): - return self.__class__.__name__ + '({})'.format(self.backend_name) + return self.__class__.__name__ + "({})".format(self.backend_name) def reset(self, backend_options=None): if backend_options is None: backend_options = {} super().reset(backend_options) - self.backend_options.pop('batch_size', None) # HACK to make tests work for now + self.backend_options.pop("batch_size", None) # HACK to make tests work for now self.backend.reset(**self.backend_options) # TODO should backend.reset and backend.begin_circuit be combined? @@ -309,14 +318,22 @@ def _run_program(self, prog, **kwargs): for cmd in prog.circuit: try: # try to apply it to the backend - cmd.op.apply(cmd.reg, self.backend, **kwargs) # NOTE we could also handle storing measured vals here + cmd.op.apply( + cmd.reg, self.backend, **kwargs + ) # NOTE we could also handle storing measured vals here applied.append(cmd) except NotApplicableError: # command is not applicable to the current backend type - raise NotApplicableError('The operation {} cannot be used with {}.'.format(cmd.op, self.backend)) from None + raise NotApplicableError( + "The operation {} cannot be used with {}.".format(cmd.op, self.backend) + ) from None except NotImplementedError: # command not directly supported by backend API - raise NotImplementedError('The operation {} has not been implemented in {} for the arguments {}.'.format(cmd.op, self.backend, kwargs)) from None + raise NotImplementedError( + "The operation {} has not been implemented in {} for the arguments {}.".format( + cmd.op, self.backend, kwargs + ) + ) from None return applied def run(self, program, *, shots=1, compile_options={}, modes=None, state_options={}, **kwargs): @@ -343,7 +360,9 @@ def run(self, program, *, shots=1, compile_options={}, modes=None, state_options # empty sequence pass else: - result.state = self.backend.state(modes, **state_options) # tfbackend.state can use kwargs + result.state = self.backend.state( + modes, **state_options + ) # tfbackend.state can use kwargs return result @@ -359,7 +378,7 @@ class StarshipEngine(BaseEngine): """ API_DEFAULT_REFRESH_SECONDS = 1 - HARDWARE_BACKENDS = ('chip0', ) + HARDWARE_BACKENDS = ("chip0",) def __init__(self, api_client_params=None): # Only chip0 backend supported initially. @@ -371,7 +390,7 @@ def __init__(self, api_client_params=None): self.jobs = [] def __str__(self): - return self.__class__.__name__ + '({})'.format(self.backend_name) + return self.__class__.__name__ + "({})".format(self.backend_name) def reset(self, backend_options=None): """ @@ -405,16 +424,31 @@ def generate_job_content(self, name, shots, blackbird_code): str: A string containing the job content to be sent to the server. """ target = self.backend_name - return "\n".join([ - "name {name}", - "version 1.0", - "target {target} (shots={shots})", - "", - "{blackbird_code}"]).format( - name=name, - target=target, - shots=str(shots), - blackbird_code=blackbird_code) + return "\n".join( + [ + "name {name}", + "version 1.0", + "target {target} (shots={shots})", + "", + "{blackbird_code}", + ] + ).format(name=name, target=target, shots=str(shots), blackbird_code=blackbird_code) + + def _queue_job(self, job_content): + """ + Create a Job instance based on job_content, and send the job to the API. Append to list + of jobs. + + Args: + job_content (str): The Blackbird code to execute + + Returns: + (strawberryfields.api_client.Job): A Job instance referencing the queued job. + """ + job = Job(client=self.client) + job.manager.create(circuit=job_content) + self.jobs.append(job) + return job def _run_program(self, program, **kwargs): """ @@ -434,31 +468,29 @@ def _run_program(self, program, **kwargs): Exception: In case a job could not be submitted or completed. TypeError: In case a job is already queued and a user is trying to submit a new job. """ - blackbird_code = to_blackbird(program) - job_content = self.generate_job_content(blackbird_code=blackbird_code, **kwargs) - if self.jobs: raise TypeError("A job is already queued. Please reset the engine and try again.") - job = Job(client=self.client) - job.manager.create(circuit=job_content) - self.jobs.append(job) + blackbird_code = to_blackbird(program) + job_content = self.generate_job_content(blackbird_code=blackbird_code, **kwargs) + job = self._queue_job(job_content) try: - while not job.is_complete: + while not job.is_failed and not job.is_complete: job.reload() - if job.is_failed: - raise Exception("The job could not be submitted or completed.") sleep(self.API_DEFAULT_REFRESH_SECONDS) - - job.result.manager.get() - return job.result.result.value except KeyboardInterrupt: if job.id: print("Job {} is queued in the background.".format(job.id.value)) else: - raise Exception( - "Job could not be sent to server, please try again later.") + raise Exception("Job was not sent to server. Please try again.") + + if job.is_failed: + # TODO: Add failure details here, and use a better exception. + raise Exception("Job execution failed. Please try again.") + elif job.is_complete: + job.result.manager.get() + return job.result.result.value def run(self, program, shots=1, name=None, **kwargs): """ diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index cf29b5a4b..b703efd0a 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -211,23 +211,32 @@ def test_generate_job_content(self, starship_engine): assert lines[3] == "" assert lines[4] == str(blackbird_code) + def test_queue_job(self, starship_engine, monkeypatch): + mock_job = MagicMock() + monkeypatch.setattr("strawberryfields.engine.Job", mock_job) + mock_job_content = MagicMock() + + result = starship_engine._queue_job(mock_job_content) + mock_job.assert_called_once_with(client=starship_engine.client) + result.manager.create.assert_called_once_with(circuit=mock_job_content) + assert starship_engine.jobs == [result] + def test__run_program(self, starship_engine, monkeypatch): """ Tests StarshipEngine._run_program. Asserts that a program is converted to blackbird code, - compiled into a job content string that the API can accept, and that a Job is submitted via - the APIClient to the API with the correct attributes. Also asserts that a completed job's - result samples are returned. + compiled into a job content string and that the job is queued. Also asserts that a + completed job's result samples are returned. """ mock_to_blackbird = MagicMock() mock_generate_job_content = MagicMock() - mock_job = MagicMock() program = MagicMock() - + mock_job = MagicMock() mock_job.is_complete = True + mock_job.is_failed = False monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) - monkeypatch.setattr("strawberryfields.engine.Job", mock_job) + monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) some_params = {"param": MagicMock()} result = starship_engine._run_program(program, **some_params) @@ -237,16 +246,27 @@ def test__run_program(self, starship_engine, monkeypatch): blackbird_code=mock_to_blackbird(program), param=some_params["param"] ) - mock_job.assert_called_once_with(client=starship_engine.client) + assert result == mock_job.result.result.value - mock_job_instance = mock_job(client=starship_engine.client) - mock_job_instance.manager.create.assert_called_once_with( - circuit=mock_generate_job_content(mock_to_blackbird(program)) - ) - mock_job_instance.result.manager.get.assert_called_once() + def test__run_program_fails(self, starship_engine, monkeypatch): + """ + Tests that an Exception is raised when a job has failed. + """ + mock_to_blackbird = MagicMock() + mock_generate_job_content = MagicMock() + program = MagicMock() + mock_job = MagicMock() + mock_job.is_complete = False + mock_job.is_failed = True + + monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) + monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) + monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) + + some_params = {"param": MagicMock()} - assert starship_engine.jobs == [mock_job(client=starship_engine.client)] - assert result == mock_job_instance.result.result.value + with pytest.raises(Exception): + starship_engine._run_program(program, **some_params) def test__run(self, starship_engine, monkeypatch): """ From 777e0a5caf736a99fc14aef66a3d54a5135046c7 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 4 Jul 2019 08:19:05 -0400 Subject: [PATCH 048/335] Minor refactor for tests --- tests/frontend/test_engine.py | 54 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index b703efd0a..1105ccc38 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -199,17 +199,17 @@ def test_generate_job_content(self, starship_engine): Tests that StarshipEngine.generate_job_content returns the correct string given name, shots, and blackbird_code parameters. """ - name = MagicMock() - shots = MagicMock() - blackbird_code = MagicMock() + inputs = MagicMock() - output = starship_engine.generate_job_content(name, shots, blackbird_code) + output = starship_engine.generate_job_content( + inputs.name, inputs.shots, inputs.blackbird_code + ) lines = output.split("\n") - assert lines[0] == "name {}".format(name) + assert lines[0] == "name {}".format(inputs.name) assert lines[1] == "version 1.0" - assert lines[2] == "target {} (shots={})".format(starship_engine.backend_name, shots) + assert lines[2] == "target {} (shots={})".format(starship_engine.backend_name, inputs.shots) assert lines[3] == "" - assert lines[4] == str(blackbird_code) + assert lines[4] == str(inputs.blackbird_code) def test_queue_job(self, starship_engine, monkeypatch): mock_job = MagicMock() @@ -276,27 +276,25 @@ def test__run(self, starship_engine, monkeypatch): a Result object is returned populated with the result samples. """ - mock_hardware_backend_name = str(MagicMock()) - mock_run_program = MagicMock() - mock_program = MagicMock() - mock_result = MagicMock() - mock_shots = MagicMock() + inputs = MagicMock() + outputs = MagicMock() + methods = MagicMock() - monkeypatch.setattr(starship_engine, "backend_name", mock_hardware_backend_name) - monkeypatch.setattr(starship_engine, "HARDWARE_BACKENDS", [mock_hardware_backend_name]) - monkeypatch.setattr(starship_engine, "_run_program", mock_run_program) - monkeypatch.setattr("strawberryfields.engine.Result", mock_result) + monkeypatch.setattr(starship_engine, "backend_name", str(inputs.mock_backend)) + monkeypatch.setattr(starship_engine, "HARDWARE_BACKENDS", [str(inputs.mock_backend)]) + monkeypatch.setattr(starship_engine, "_run_program", methods._run_program) + monkeypatch.setattr("strawberryfields.engine.Result", outputs.result) - result = starship_engine._run(mock_program, shots=mock_shots) + result = starship_engine._run(inputs.program, shots=inputs.shots) assert starship_engine.backend_name in starship_engine.HARDWARE_BACKENDS - mock_program.compile.assert_called_once_with(starship_engine.backend_name) - mock_compiled_program = mock_program.compile(starship_engine.backend_name) + inputs.program.compile.assert_called_once_with(starship_engine.backend_name) + mock_compiled_program = inputs.program.compile(starship_engine.backend_name) mock_compiled_program.lock.assert_called_once() - mock_run_program.assert_called_once_with(mock_compiled_program, shots=mock_shots) - mock_samples = mock_run_program(mock_compiled_program, shots=mock_shots) + methods._run_program.assert_called_once_with(mock_compiled_program, shots=inputs.shots) + mock_samples = methods._run_program(mock_compiled_program, shots=inputs.shots) assert starship_engine.run_progs == [mock_compiled_program] - assert result == mock_result(mock_samples) + assert result == outputs.result(mock_samples) def test_run(self, starship_engine, monkeypatch): """ @@ -306,13 +304,13 @@ def test_run(self, starship_engine, monkeypatch): mock_run = MagicMock() monkeypatch.setattr("strawberryfields.engine.BaseEngine._run", mock_run) - name = MagicMock() - program = MagicMock() - shots = MagicMock() - params = {"param": MagicMock()} + inputs = MagicMock() + inputs.params = {"param": MagicMock()} - starship_engine.run(program, shots, name, **params) - mock_run.assert_called_once_with(program, shots=shots, name=name, param=params["param"]) + starship_engine.run(inputs.program, inputs.shots, inputs.name, **inputs.params) + mock_run.assert_called_once_with( + inputs.program, shots=inputs.shots, name=inputs.name, param=inputs.params["param"] + ) def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): """ From 56f7692135dc581fab03f51ffdbd4c687b0112d6 Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Thu, 4 Jul 2019 11:36:29 -0400 Subject: [PATCH 049/335] Apply suggestions from code review (docstrings) Co-Authored-By: Josh Izaac --- strawberryfields/engine.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 09f68b282..705536ef6 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -416,12 +416,12 @@ def generate_job_content(self, name, shots, blackbird_code): Assumes the current backend as the target. Args: - name (str): The name of the job to be created (e.g. StateTeleportation). - shots (int): The number of shots. - blackbird_code: The blackbird code of the job. + name (str): the name of the job to be created (e.g. StateTeleportation) + shots (int): the number of shots + blackbird_code: the blackbird code of the job Returns: - str: A string containing the job content to be sent to the server. + str: job content to be sent to the server """ target = self.backend_name return "\n".join( @@ -440,10 +440,10 @@ def _queue_job(self, job_content): of jobs. Args: - job_content (str): The Blackbird code to execute + job_content (str): the Blackbird code to execute Returns: - (strawberryfields.api_client.Job): A Job instance referencing the queued job. + (strawberryfields.api_client.Job): a Job instance referencing the queued job """ job = Job(client=self.client) job.manager.create(circuit=job_content) @@ -455,14 +455,14 @@ def _run_program(self, program, **kwargs): Given a compiled program, gets the blackbird circuit code and creates (or resumes) a job via the API. If the job is completed, returns the job result. - A queued job can be interrupted by a KeyboardInterrupt event, at which point if the job ID - was retrieved from the server, the job will be accessible via engine.jobs. + A queued job can be interrupted by a ``KeyboardInterrupt`` event, at which point if the job ID + was retrieved from the server, the job will be accessible via :meth:`~.Starship.jobs`. Args: - program (strawberryfields.program.Program): A program instance to be executed remotely. + program (strawberryfields.program.Program): program to be executed remotely Returns: - (list): A list representing the result samples + (list): a list representing the result samples Raises: Exception: In case a job could not be submitted or completed. From 27f665cc719839b44a9620659c51b16a017e4e9a Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 4 Jul 2019 11:40:02 -0400 Subject: [PATCH 050/335] Refactoring various methods and tests, as well as code review feedback - Minor change in default config of APIClient to expose parameter names - Refactor _run method to simplify and combine logic for all engines - Refactor get_job_content and rename to _get_blackbird --- strawberryfields/api_client.py | 4 +- strawberryfields/engine.py | 100 ++++++++++++++++++-------------- tests/frontend/test_engine.py | 102 +++++++++++++++++---------------- 3 files changed, 112 insertions(+), 94 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 9d7819fa6..bdc726114 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -133,6 +133,8 @@ class APIClient: ENV_API_HOSTNAME_KEY = "{}API_HOSTNAME".format(ENV_KEY_PREFIX) ENV_USE_SSL_KEY = "{}USE_SSL".format(ENV_KEY_PREFIX) + DEFAULT_CONFIG = {"use_ssl": True, "hostname": DEFAULT_HOSTNAME, "authentication_token": None} + def __init__(self, **kwargs): """ Initialize the API client with various parameters. @@ -140,7 +142,7 @@ def __init__(self, **kwargs): # TODO: Load username, password, or authentication token from # configuration file - config = {"use_ssl": True, "hostname": self.DEFAULT_HOSTNAME, "authentication_token": None} + config = {k: v for k, v in self.DEFAULT_CONFIG.items()} # Try getting everything first from configuration config.update(self.get_configuration_from_config()) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 09f68b282..c548beed7 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -218,7 +218,7 @@ def _run(self, program, *, shots=1, compile_options={}, **kwargs): Result: results of the computation """ - def _normalize_sample(val): + def _broadcast_nones(val, shots): """Helper function to ensure register values have same shape, even if not measured""" if val is None and shots > 1: return [None] * shots @@ -232,14 +232,6 @@ def _normalize_sample(val): # signatures of methods in Operations to remain cleaner, since only # Measurements need to know about shots - if self.backend_name in getattr(self, "HARDWARE_BACKENDS", []): - p = program[0] - p = p.compile(self.backend_name) # TODO: does compile need to know about shots? - p.lock() - self.run_progs.append(p) - samples = self._run_program(p, **kwargs) - return Result(samples) - prev = self.run_progs[-1] if self.run_progs else None # previous program segment for p in program: if prev is None: @@ -264,11 +256,16 @@ def _normalize_sample(val): ) # TODO: shots might be relevant for compilation? p.lock() - self._run_program(p, **kwargs) - self.run_progs.append(p) + if self.backend_name in getattr(self, "HARDWARE_BACKENDS", []): + self.samples = self._run_program(p, **kwargs) + else: + self._run_program(p, **kwargs) + shots = kwargs.get("shots", 1) + self.samples = [ + _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) + ] - reg_refs = [p.reg_refs[k].val for k in sorted(p.reg_refs)] - self.samples = map(_normalize_sample, reg_refs) + self.run_progs.append(p) prev = p return Result(list(self.samples)) @@ -377,32 +374,21 @@ class StarshipEngine(BaseEngine): backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance """ - API_DEFAULT_REFRESH_SECONDS = 1 HARDWARE_BACKENDS = ("chip0",) - def __init__(self, api_client_params=None): + def __init__(self, polling_delay_seconds=1, **kwargs): # Only chip0 backend supported initially. backend = "chip0" super().__init__(backend) - api_client_params = api_client_params or {} + api_client_params = {k: v for k, v in kwargs.items() if k in APIClient.DEFAULT_CONFIG} self.client = APIClient(**api_client_params) + self.polling_delay_seconds = polling_delay_seconds self.jobs = [] def __str__(self): return self.__class__.__name__ + "({})".format(self.backend_name) - def reset(self, backend_options=None): - """ - Reset must be called in order to submit a new job. This clears the job queue as well as - any ran Programs. - """ - if backend_options is None: - backend_options = {} - - super().reset(backend_options) - self.jobs.clear() - def _init_backend(self, *args): """ TODO: This does not do anything right now. @@ -410,9 +396,17 @@ def _init_backend(self, *args): # Do nothing for now... pass - def generate_job_content(self, name, shots, blackbird_code): + def reset(self): + """ + Reset must be called in order to submit a new job. This clears the job queue as well as + any ran Programs. + """ + super().reset(backend_options={}) + self.jobs.clear() + + def _get_blackbird(self, name, shots, program): """ - Generates a string representing the Blackbird code that will be sent to the server. + Returns a Blackbird object to be sent later to the server when creating a job. Assumes the current backend as the target. Args: @@ -423,16 +417,14 @@ def generate_job_content(self, name, shots, blackbird_code): Returns: str: A string containing the job content to be sent to the server. """ - target = self.backend_name - return "\n".join( - [ - "name {name}", - "version 1.0", - "target {target} (shots={shots})", - "", - "{blackbird_code}", - ] - ).format(name=name, target=target, shots=str(shots), blackbird_code=blackbird_code) + bb = to_blackbird(program, version="1.0") + bb._name = name + + # TODO: This is potentially not needed here + bb._target["name"] = self.backend_name + + bb._target["options"] = {"shots": shots} + return bb def _queue_job(self, job_content): """ @@ -471,14 +463,13 @@ def _run_program(self, program, **kwargs): if self.jobs: raise TypeError("A job is already queued. Please reset the engine and try again.") - blackbird_code = to_blackbird(program) - job_content = self.generate_job_content(blackbird_code=blackbird_code, **kwargs) + job_content = self._get_blackbird(program=program, **kwargs).serialize() job = self._queue_job(job_content) try: while not job.is_failed and not job.is_complete: job.reload() - sleep(self.API_DEFAULT_REFRESH_SECONDS) + sleep(self.polling_delay_seconds) except KeyboardInterrupt: if job.id: print("Job {} is queued in the background.".format(job.id.value)) @@ -493,9 +484,30 @@ def _run_program(self, program, **kwargs): return job.result.result.value def run(self, program, shots=1, name=None, **kwargs): + """Compile the given program and execute it by queuing a job in the Starship. + + For the :class:`Program` instance given as input, the following happens: + + * The Program instance is compiled for the target backend. + * The compiled program is sent as a job to the Starship + * The measurement results of each subsystem (if any) are stored in the :attr:`~.samples`. + * The compiled program is appended to self.run_progs. + * The queued or completed jobs are appended to self.jobs. + + Finally, the result of the computation is returned. + + Args: + program (Program, Sequence[Program]): quantum programs to run + name (str): The name of the program (an arbitrary string) + shots (int): number of times the program measurement evaluation is repeated + + The ``kwargs`` keyword arguments are passed to :meth:`_run_program`. + + Returns: + Result: results of the computation """ - Compile a given program and queue a job in the Starship. - """ + + # TODO: this is probably not needed name = name or str(uuid.uuid4()) return super()._run(program, shots=shots, name=name, **kwargs) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 1105ccc38..434dc1232 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -13,16 +13,15 @@ # limitations under the License. r"""Unit tests for engine.py""" import pytest - -pytestmark = pytest.mark.frontend +from unittest.mock import MagicMock, call import strawberryfields as sf -from strawberryfields import ops -from strawberryfields.backends.base import BaseBackend -from unittest.mock import MagicMock from strawberryfields import StarshipEngine +from strawberryfields import ops from strawberryfields.api_client import APIClient -from strawberryfields.ops import S2gate, MeasureFock, Rgate, BSgate +from strawberryfields.backends.base import BaseBackend + +pytestmark = pytest.mark.frontend @pytest.fixture @@ -47,8 +46,7 @@ def starship_engine(monkeypatch): """ mock_api_client = MagicMock() monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine() - monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) + engine = StarshipEngine(polling_delay_seconds=0) return engine @@ -179,10 +177,9 @@ def test_init(self, monkeypatch): parameters are passed. """ mock_api_client = MagicMock() - mock_api_client_params = {"param": MagicMock()} monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine(mock_api_client_params) - mock_api_client.assert_called_once_with(param=mock_api_client_params["param"]) + engine = StarshipEngine() + assert engine.client == mock_api_client() assert engine.jobs == [] def test_reset(self, starship_engine): @@ -194,22 +191,27 @@ def test_reset(self, starship_engine): starship_engine.reset() assert len(starship_engine.jobs) == 0 - def test_generate_job_content(self, starship_engine): + def test__get_blackbird(self, starship_engine, monkeypatch): """ - Tests that StarshipEngine.generate_job_content returns the correct string given name, - shots, and blackbird_code parameters. + Tests that StarshipEngine._get_blackbird returns the correct string given name, + shots, and program parameters. """ + methods = MagicMock() inputs = MagicMock() - output = starship_engine.generate_job_content( - inputs.name, inputs.shots, inputs.blackbird_code + monkeypatch.setattr("strawberryfields.engine.to_blackbird", methods.to_blackbird) + + output = starship_engine._get_blackbird(inputs.name, inputs.shots, inputs.program) + + methods.to_blackbird.assert_called_once_with(inputs.program, version="1.0") + assert output._name == inputs.name + assert len(output._target.__setitem__.call_args_list) == 2 + assert output._target.__setitem__.call_args_list[0] == call( + "name", starship_engine.backend_name + ) + assert output._target.__setitem__.call_args_list[1] == call( + "options", {"shots": inputs.shots} ) - lines = output.split("\n") - assert lines[0] == "name {}".format(inputs.name) - assert lines[1] == "version 1.0" - assert lines[2] == "target {} (shots={})".format(starship_engine.backend_name, inputs.shots) - assert lines[3] == "" - assert lines[4] == str(inputs.blackbird_code) def test_queue_job(self, starship_engine, monkeypatch): mock_job = MagicMock() @@ -228,23 +230,20 @@ def test__run_program(self, starship_engine, monkeypatch): completed job's result samples are returned. """ mock_to_blackbird = MagicMock() - mock_generate_job_content = MagicMock() + mock__get_blackbird = MagicMock() program = MagicMock() mock_job = MagicMock() mock_job.is_complete = True mock_job.is_failed = False monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) - monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) + monkeypatch.setattr(starship_engine, "_get_blackbird", mock__get_blackbird) monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) some_params = {"param": MagicMock()} result = starship_engine._run_program(program, **some_params) - mock_to_blackbird.assert_called_once_with(program) - mock_generate_job_content.assert_called_once_with( - blackbird_code=mock_to_blackbird(program), param=some_params["param"] - ) + mock__get_blackbird.assert_called_once_with(program=program, param=some_params["param"]) assert result == mock_job.result.result.value @@ -253,14 +252,14 @@ def test__run_program_fails(self, starship_engine, monkeypatch): Tests that an Exception is raised when a job has failed. """ mock_to_blackbird = MagicMock() - mock_generate_job_content = MagicMock() + mock__get_blackbird = MagicMock() program = MagicMock() mock_job = MagicMock() mock_job.is_complete = False mock_job.is_failed = True monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) - monkeypatch.setattr(starship_engine, "generate_job_content", mock_generate_job_content) + monkeypatch.setattr(starship_engine, "_get_blackbird", mock__get_blackbird) monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) some_params = {"param": MagicMock()} @@ -277,6 +276,7 @@ def test__run(self, starship_engine, monkeypatch): """ inputs = MagicMock() + inputs.shots = 1 outputs = MagicMock() methods = MagicMock() @@ -292,9 +292,11 @@ def test__run(self, starship_engine, monkeypatch): mock_compiled_program = inputs.program.compile(starship_engine.backend_name) mock_compiled_program.lock.assert_called_once() methods._run_program.assert_called_once_with(mock_compiled_program, shots=inputs.shots) - mock_samples = methods._run_program(mock_compiled_program, shots=inputs.shots) + assert starship_engine.samples == starship_engine._run_program( + mock_compiled_program, shots=inputs.shots + ) assert starship_engine.run_progs == [mock_compiled_program] - assert result == outputs.result(mock_samples) + assert result == outputs.result(starship_engine.samples) def test_run(self, starship_engine, monkeypatch): """ @@ -322,15 +324,17 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): # NOTE: this is currently more of an integration test, currently a WIP / under development. api_client_params = {"hostname": "localhost"} - engine = StarshipEngine(api_client_params) - monkeypatch.setattr(engine, "API_DEFAULT_REFRESH_SECONDS", 0) + engine = StarshipEngine(polling_delay_seconds=0, **api_client_params) # We don't want to actually send any requests, though we should make sure POST was called mock_api_client_post = MagicMock() mock_get = MagicMock() mock_get_response = MagicMock() mock_get_response.status_code = 200 - mock_get_response.json.return_value = {"status": "COMPLETE", "id": 1234} + + # Including "result" here is a little hacky, but it is here as this response is returned + # for both job.get() and job.result.get() + mock_get_response.json.return_value = {"status": "COMPLETE", "id": 1234, "result": {1: []}} mock_get.return_value = mock_get_response mock_post_response = MagicMock() @@ -343,20 +347,20 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): prog = sf.Program(4) with prog.context as q: - S2gate(2) | [0, 2] - S2gate(2) | [1, 3] - Rgate(3) | 0 - BSgate() | [0, 1] - Rgate(3) | 0 - Rgate(3) | 1 - Rgate(3) | 2 - BSgate() | [2, 3] - Rgate(3) | 2 - Rgate(3) | 3 - MeasureFock() | [0] - MeasureFock() | [1] - MeasureFock() | [2] - MeasureFock() | [3] + ops.S2gate(2) | [0, 2] + ops.S2gate(2) | [1, 3] + ops.Rgate(3) | 0 + ops.BSgate() | [0, 1] + ops.Rgate(3) | 0 + ops.Rgate(3) | 1 + ops.Rgate(3) | 2 + ops.BSgate() | [2, 3] + ops.Rgate(3) | 2 + ops.Rgate(3) | 3 + ops.MeasureFock() | [0] + ops.MeasureFock() | [1] + ops.MeasureFock() | [2] + ops.MeasureFock() | [3] engine.run(prog) From 6d2e934e4c42b4df02adfac0219bd149e2dd3fba Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Thu, 4 Jul 2019 13:50:22 -0400 Subject: [PATCH 051/335] Apply suggestions from code review Co-Authored-By: Ville Bergholm --- strawberryfields/program_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/program_utils.py b/strawberryfields/program_utils.py index af431e4ba..1910a0791 100644 --- a/strawberryfields/program_utils.py +++ b/strawberryfields/program_utils.py @@ -110,7 +110,7 @@ def __init__(self, op, reg): def __str__(self): """ - Return a string containing the Blackbird syntax. + Return a string containing the command in Blackbird syntax. """ operation = str(self.op) From 0d59a743a1c8ba957189baf5a34e6e8da4067e05 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 4 Jul 2019 14:43:28 -0400 Subject: [PATCH 052/335] Add better and more specific exceptions --- strawberryfields/api_client.py | 16 ++++++++++++++++ strawberryfields/engine.py | 24 +++++++++++++++++------- tests/frontend/test_engine.py | 4 ++-- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index bdc726114..af070ae41 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -117,6 +117,22 @@ class ObjectAlreadyCreatedException(TypeError): pass +class JobNotQueuedError(Exception): + """ + Raised when a job is not successfully queued for whatever reason. + """ + + pass + + +class JobExecutionError(Exception): + """ + Raised when job execution failed and a job result does not exist. + """ + + pass + + class APIClient: """ An object that allows the user to connect to the Xanadu Platform API. diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 770c2b0bc..345ea26bc 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -82,10 +82,18 @@ from .backends import load_backend from .backends.base import NotApplicableError, BaseBackend -from strawberryfields.api_client import APIClient, Job +from strawberryfields.api_client import APIClient, Job, JobNotQueuedError, JobExecutionError from strawberryfields.io import to_blackbird +class OneJobAtATimeError(Exception): + """ + Raised when a user attempts to execute more than one job on the same engine instance. + """ + + pass + + class Result: """Result of a quantum computation. @@ -412,10 +420,10 @@ def _get_blackbird(self, name, shots, program): Args: name (str): the name of the job to be created (e.g. StateTeleportation) shots (int): the number of shots - blackbird_code: the blackbird code of the job + program (Program): program to be converted to Blackbird code Returns: - str: job content to be sent to the server + blackbird.BlackbirdProgram """ bb = to_blackbird(program, version="1.0") bb._name = name @@ -461,7 +469,9 @@ def _run_program(self, program, **kwargs): TypeError: In case a job is already queued and a user is trying to submit a new job. """ if self.jobs: - raise TypeError("A job is already queued. Please reset the engine and try again.") + raise OneJobAtATimeError( + "A job is already queued. Please reset the engine and try again." + ) job_content = self._get_blackbird(program=program, **kwargs).serialize() job = self._queue_job(job_content) @@ -474,11 +484,11 @@ def _run_program(self, program, **kwargs): if job.id: print("Job {} is queued in the background.".format(job.id.value)) else: - raise Exception("Job was not sent to server. Please try again.") + raise JobNotQueuedError("Job was not sent to server. Please try again.") if job.is_failed: - # TODO: Add failure details here, and use a better exception. - raise Exception("Job execution failed. Please try again.") + # TODO: Add failure details here. Should this exception be raised elsewhere? + raise JobExecutionError("Job execution failed. Please try again.") elif job.is_complete: job.result.manager.get() return job.result.result.value diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 434dc1232..9e28191e7 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -18,7 +18,7 @@ import strawberryfields as sf from strawberryfields import StarshipEngine from strawberryfields import ops -from strawberryfields.api_client import APIClient +from strawberryfields.api_client import APIClient, JobExecutionError from strawberryfields.backends.base import BaseBackend pytestmark = pytest.mark.frontend @@ -264,7 +264,7 @@ def test__run_program_fails(self, starship_engine, monkeypatch): some_params = {"param": MagicMock()} - with pytest.raises(Exception): + with pytest.raises(JobExecutionError): starship_engine._run_program(program, **some_params) def test__run(self, starship_engine, monkeypatch): From 18236a882bd8ca9f28b21c46c3cfc0c5e401d20f Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 4 Jul 2019 14:51:08 -0400 Subject: [PATCH 053/335] Minor change to default config setup, added exception classes --- strawberryfields/api_client.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 9d7819fa6..af070ae41 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -117,6 +117,22 @@ class ObjectAlreadyCreatedException(TypeError): pass +class JobNotQueuedError(Exception): + """ + Raised when a job is not successfully queued for whatever reason. + """ + + pass + + +class JobExecutionError(Exception): + """ + Raised when job execution failed and a job result does not exist. + """ + + pass + + class APIClient: """ An object that allows the user to connect to the Xanadu Platform API. @@ -133,6 +149,8 @@ class APIClient: ENV_API_HOSTNAME_KEY = "{}API_HOSTNAME".format(ENV_KEY_PREFIX) ENV_USE_SSL_KEY = "{}USE_SSL".format(ENV_KEY_PREFIX) + DEFAULT_CONFIG = {"use_ssl": True, "hostname": DEFAULT_HOSTNAME, "authentication_token": None} + def __init__(self, **kwargs): """ Initialize the API client with various parameters. @@ -140,7 +158,7 @@ def __init__(self, **kwargs): # TODO: Load username, password, or authentication token from # configuration file - config = {"use_ssl": True, "hostname": self.DEFAULT_HOSTNAME, "authentication_token": None} + config = {k: v for k, v in self.DEFAULT_CONFIG.items()} # Try getting everything first from configuration config.update(self.get_configuration_from_config()) From 4715ca9c44bb8b4259ecff9382a15bb2afc72864 Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Tue, 9 Jul 2019 08:51:00 -0400 Subject: [PATCH 054/335] Apply suggestions from code review (docstrings) Co-Authored-By: Josh Izaac --- strawberryfields/api_client.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index af070ae41..b914a9149 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -26,9 +26,9 @@ with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient that is available for each resource. -A single APIClient instance can be used throughout one's session in the application. The application -will attempt to configure the APIClient instance using a configuration file or defaults, but the -user can choose to override various parameters of the APIClient manually. +A single :class:`~.APIClient` instance can be used throughout one's session in the application. The application +will attempt to configure the :class:`~.APIClient` instance using a configuration file or defaults, but the +user can choose to override various parameters of the :class:`~.APIClient` manually. A typical use looks like this: .. code-block:: python @@ -197,8 +197,8 @@ def authenticate(self, username, password): and password authentication and calls set_authorization_header. Args: - username (str): A user name. - password (str): A password. + username (str): a user name + password (str): password """ raise NotImplementedError() @@ -208,19 +208,19 @@ def set_authorization_header(self, authentication_token): with all API requests. Args: - authentication_token (str): An authentication token used to access the API. + authentication_token (str): an authentication token used to access the API """ self.HEADERS["Authorization"] = authentication_token def join_path(self, path): """ - Joins a base url with an additional path (e.g., a resource name and ID) + Joins a base url with an additional path (e.g., a resource name and ID). Args: - path (str): A path to be joined with BASE_URL. + path (str): A path to be joined with ``BASE_URL`` Returns: - str: A joined path. + str: resulting joined path """ return join_path(self.BASE_URL, path) @@ -229,7 +229,7 @@ def get(self, path): Sends a GET request to the provided path. Returns a response object. Args: - path (str): A path to send the GET request to. + path (str): path to send the GET request to Returns: requests.Response: A response object, or None if no response could be fetched from the @@ -248,8 +248,8 @@ def post(self, path, payload): path. Returns a response object. Args: - path (str): A path to send the GET request to. - payload: A JSON serializable object to be sent to the server. + path (str): path to send the GET request to + payload: JSON serializable object to be sent to the server Returns: requests.Response: A response object, or None if no response could be fetched from the @@ -295,7 +295,7 @@ def get(self, resource_id=None): object is populated with the data in the response. Args: - resource_id (int): The ID of an object to be retrieved. + resource_id (int): the ID of an object to be retrieved """ if "GET" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("GET method on this resource is not supported") @@ -312,7 +312,7 @@ def create(self, **params): request to the appropriate endpoint. Args: - **params: Arbitrary parameters to be passed on to the POST request. + **params: arbitrary parameters to be passed on to the POST request """ if "POST" not in self.resource.SUPPORTED_METHODS: raise MethodNotSupportedException("POST method on this resource is not supported") @@ -330,7 +330,7 @@ def handle_response(self, response): based on the status code. Args: - response (requests.Response): A response object to be parsed. + response (requests.Response): a response object to be parsed """ if hasattr(response, "status_code"): self.http_status_code = response.status_code @@ -353,7 +353,7 @@ def handle_success_response(self, response): Handles a successful response by refreshing the instance fields. Args: - response (requests.Response): A response object to be parsed. + response (requests.Response): a response object to be parsed """ self.refresh_data(response.json()) @@ -362,7 +362,7 @@ def handle_error_response(self, response): Handles an error response that is returned by the server. Args: - response (requests.Response): A response object to be parsed. + response (requests.Response): a response object to be parsed """ # TODO: Improve error messaging and parse the actual error output (json). From 901ae25291e538666c9ce7dad0f988fa34510509 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 9 Jul 2019 09:37:11 -0400 Subject: [PATCH 055/335] Minor changes and fixes - Use configuration for defaults rather than duplicating it in APIClient - Don't catch ConnectionError - Documentation fixes - Check for boolean value of AUTHENTICATION_TOKEN --- strawberryfields/api_client.py | 43 +++++++++++------------------ tests/api_client/test_api_client.py | 8 +++--- 2 files changed, 20 insertions(+), 31 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index b914a9149..04d46735d 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -26,9 +26,10 @@ with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient that is available for each resource. -A single :class:`~.APIClient` instance can be used throughout one's session in the application. The application -will attempt to configure the :class:`~.APIClient` instance using a configuration file or defaults, but the -user can choose to override various parameters of the :class:`~.APIClient` manually. +A single :class:`~.APIClient` instance can be used throughout one's session in the application. +The application will attempt to configure the :class:`~.APIClient` instance using a configuration +file or defaults, but the user can choose to override various parameters of the :class:`~.APIClient` +manually. A typical use looks like this: .. code-block:: python @@ -53,7 +54,7 @@ ''' job.manager.create(circuit=circuit) - job.id # Returns the job ID that was generated by the server + job.id # Returns the Job's id Field for the job that was sent to the server job.reload() # Fetches the latest job data from the server job.status # Prints the status of this job job.result # Returns a JobResult object @@ -149,19 +150,11 @@ class APIClient: ENV_API_HOSTNAME_KEY = "{}API_HOSTNAME".format(ENV_KEY_PREFIX) ENV_USE_SSL_KEY = "{}USE_SSL".format(ENV_KEY_PREFIX) - DEFAULT_CONFIG = {"use_ssl": True, "hostname": DEFAULT_HOSTNAME, "authentication_token": None} - def __init__(self, **kwargs): """ Initialize the API client with various parameters. """ - # TODO: Load username, password, or authentication token from - # configuration file - - config = {k: v for k, v in self.DEFAULT_CONFIG.items()} - - # Try getting everything first from configuration - config.update(self.get_configuration_from_config()) + config = self.get_configuration_from_config() # Override any values that are explicitly passed when initializing client config.update(kwargs) @@ -181,9 +174,12 @@ def __init__(self, **kwargs): self.AUTHENTICATION_TOKEN = config["authentication_token"] self.HEADERS = {"User-Agent": self.USER_AGENT} - if self.AUTHENTICATION_TOKEN is not None: + if self.AUTHENTICATION_TOKEN: self.set_authorization_header(self.AUTHENTICATION_TOKEN) + # Configuration keys are added here for convenience + self.CONFIG_KEYS = config.keys() + def get_configuration_from_config(self): """ Retrieve configuration from environment variables or config file based on Strawberry Fields @@ -235,11 +231,8 @@ def get(self, path): requests.Response: A response object, or None if no response could be fetched from the server. """ - try: - response = requests.get(url=self.join_path(path), headers=self.HEADERS) - except requests.exceptions.ConnectionError as e: - response = None - warnings.warn("Could not connect to server ({})".format(e)) + # TODO: better error handling (e.g. ConnectionError) + response = requests.get(url=self.join_path(path), headers=self.HEADERS) return response def post(self, path, payload): @@ -255,13 +248,9 @@ def post(self, path, payload): requests.Response: A response object, or None if no response could be fetched from the server. """ - # TODO: catch any exceptions from dumping JSON + # TODO: catch any exceptions from dumping JSON, and handle request errors data = json.dumps(payload) - try: - response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) - except requests.exceptions.ConnectionError as e: - response = None - warnings.warn("Could not connect to server ({})".format(e)) + response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) return response @@ -459,11 +448,11 @@ def __init__(self, name, clean=str): self.name = name self.clean = clean - def __str__(self): + def __repr__(self): """ Return the string representation of the value. """ - return str(self.value) + return "<{} {}: {}>".format(self.name, self.__class__.__name__, str(self.value)) def __bool__(self): """ diff --git a/tests/api_client/test_api_client.py b/tests/api_client/test_api_client.py index d2c733415..c29b0bb8d 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/api_client/test_api_client.py @@ -139,9 +139,9 @@ def test_init_default_client(self): """ client = api_client.APIClient() assert client.USE_SSL is True - assert client.AUTHENTICATION_TOKEN is None + assert not client.AUTHENTICATION_TOKEN assert client.BASE_URL == "https://localhost" - assert client.HEADERS['User-Agent'] == client.USER_AGENT + assert client.HEADERS["User-Agent"] == client.USER_AGENT def test_init_default_client_no_ssl(self): """ @@ -150,9 +150,9 @@ def test_init_default_client_no_ssl(self): """ client = api_client.APIClient(use_ssl=False) assert client.USE_SSL is False - assert client.AUTHENTICATION_TOKEN is None + assert not client.AUTHENTICATION_TOKEN assert client.BASE_URL == "http://localhost" - assert client.HEADERS['User-Agent'] == client.USER_AGENT + assert client.HEADERS["User-Agent"] == client.USER_AGENT def test_init_custom_token_client(self): """ From 0ddb325bd9b689bf803c95824165cb5a2003ad54 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 9 Jul 2019 11:11:03 -0400 Subject: [PATCH 056/335] Clean up --- strawberryfields/engine.py | 57 +++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index e32ba504b..49bd2360e 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -73,7 +73,6 @@ """ import abc -import uuid from collections.abc import Sequence from numpy import stack, shape from time import sleep @@ -145,7 +144,9 @@ def state(self): def __str__(self): """String representation.""" - return "Result: {} subsystems, state: {}\n samples: {}".format(len(self.samples), self.state, self.samples) + return "Result: {} subsystems, state: {}\n samples: {}".format( + len(self.samples), self.state, self.samples + ) class BaseEngine(abc.ABC): @@ -184,7 +185,8 @@ def reset(self, backend_options): * All RegRefs of previously run Programs are cleared of measured values. * List of previously run Progams is cleared. - Note that the reset does nothing to any Program objects in existence, beyond erasing the measured values. + Note that the reset does nothing to any Program objects in existence, beyond erasing the + measured values. Args: backend_options (Dict[str, Any]): keyword arguments for the backend, @@ -207,7 +209,7 @@ def print_applied(self, print_fn=print): print_fn (function): optional custom function to use for string printing. """ for k, r in enumerate(self.run_progs): - print_fn('Run {}:'.format(k)) + print_fn("Run {}:".format(k)) r.print(print_fn) @abc.abstractmethod @@ -279,16 +281,20 @@ def _broadcast_nones(val, shots): else: # there was a previous program segment if not p.can_follow(prev): - raise RuntimeError("Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name)) + raise RuntimeError( + "Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name) + ) # Copy the latest measured values in the RegRefs of p. - # We cannot copy from prev directly because it could be used in more than one engine. + # We cannot copy from prev directly because it could be used in more than one + # engine. for k, v in enumerate(self.samples): p.reg_refs[k].val = v # if the program hasn't been compiled for this backend, do it now if p.target != self.backend_name: - p = p.compile(self.backend_name, **compile_options) # TODO: shots might be relevant for compilation? + # TODO: shots might be relevant for compilation? + p = p.compile(self.backend_name, **compile_options) p.lock() if self.backend_name in getattr(self, "HARDWARE_BACKENDS", []): @@ -296,7 +302,9 @@ def _broadcast_nones(val, shots): else: self._run_program(p, **kwargs) shots = kwargs.get("shots", 1) - self.samples = [_broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs)] + self.samples = [ + _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) + ] self.run_progs.append(p) @@ -349,14 +357,21 @@ def _run_program(self, prog, **kwargs): for cmd in prog.circuit: try: # try to apply it to the backend - cmd.op.apply(cmd.reg, self.backend, **kwargs) # NOTE we could also handle storing measured vals here + # NOTE we could also handle storing measured vals here + cmd.op.apply(cmd.reg, self.backend, **kwargs) applied.append(cmd) except NotApplicableError: # command is not applicable to the current backend type - raise NotApplicableError('The operation {} cannot be used with {}.'.format(cmd.op, self.backend)) from None + raise NotApplicableError( + "The operation {} cannot be used with {}.".format(cmd.op, self.backend) + ) from None except NotImplementedError: # command not directly supported by backend API - raise NotImplementedError('The operation {} has not been implemented in {} for the arguments {}.'.format(cmd.op, self.backend, kwargs)) from None + raise NotImplementedError( + "The operation {} has not been implemented in {} for the arguments {}.".format( + cmd.op, self.backend, kwargs + ) + ) from None return applied def run(self, program, *, shots=1, compile_options={}, modes=None, state_options={}, **kwargs): @@ -368,7 +383,8 @@ def run(self, program, *, shots=1, compile_options={}, modes=None, state_options program (Program, Sequence[Program]): quantum programs to run shots (int): number of times the program measurement evaluation is repeated compile_options (Dict[str, Any]): keyword arguments for :meth:`.Program.compile` - modes (None, Sequence[int]): Modes to be returned in the ``Result.state`` :class:`.BaseState` object. + modes (None, Sequence[int]): + Modes to be returned in the ``Result.state`` :class:`.BaseState` object. An empty sequence [] means no state object is returned. None returns all the modes. state_options (Dict[str, Any]): keyword arguments for :meth:`.BaseBackend.state` @@ -378,14 +394,16 @@ def run(self, program, *, shots=1, compile_options={}, modes=None, state_options Result: results of the computation """ - # session or feed_dict are needed by TF backend during simulation if program contains measurements + # session or feed_dict are needed by TF backend during simulation if program contains + # measurements kwargs.update(state_options) result = super()._run(program, shots=shots, compile_options=compile_options, **kwargs) if isinstance(modes, Sequence) and not modes: # empty sequence pass else: - result._state = self.backend.state(modes, **state_options) # tfbackend.state can use kwargs + # tfbackend.state can use kwargs + result._state = self.backend.state(modes, **state_options) return result @@ -407,7 +425,7 @@ def __init__(self, polling_delay_seconds=1, **kwargs): backend = "chip0" super().__init__(backend) - api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG['api'].keys()} + api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG["api"].keys()} self.client = APIClient(**api_client_params) self.polling_delay_seconds = polling_delay_seconds self.jobs = [] @@ -473,8 +491,9 @@ def _run_program(self, program, **kwargs): Given a compiled program, gets the blackbird circuit code and creates (or resumes) a job via the API. If the job is completed, returns the job result. - A queued job can be interrupted by a ``KeyboardInterrupt`` event, at which point if the job ID - was retrieved from the server, the job will be accessible via :meth:`~.Starship.jobs`. + A queued job can be interrupted by a ``KeyboardInterrupt`` event, at which point if the + job ID was retrieved from the server, the job will be accessible via + :meth:`~.Starship.jobs`. Args: program (strawberryfields.program.Program): program to be executed remotely @@ -511,7 +530,7 @@ def _run_program(self, program, **kwargs): job.result.manager.get() return job.result.result.value - def run(self, program, shots=1, name=None, **kwargs): + def run(self, program, shots=1, name="", **kwargs): """Compile the given program and execute it by queuing a job in the Starship. For the :class:`Program` instance given as input, the following happens: @@ -535,8 +554,6 @@ def run(self, program, shots=1, name=None, **kwargs): Result: results of the computation """ - # TODO: this is probably not needed - name = name or str(uuid.uuid4()) return super()._run(program, shots=shots, name=name, **kwargs) From 0314e693d70c0cbbf11713a3eb87eb2be6466814 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 9 Jul 2019 11:17:52 -0400 Subject: [PATCH 057/335] Combine APIClient tests with frontend tests --- tests/{api_client => frontend}/test_api_client.py | 2 ++ tests/pytest.ini | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) rename tests/{api_client => frontend}/test_api_client.py (99%) diff --git a/tests/api_client/test_api_client.py b/tests/frontend/test_api_client.py similarity index 99% rename from tests/api_client/test_api_client.py rename to tests/frontend/test_api_client.py index c29b0bb8d..54b203d1c 100644 --- a/tests/api_client/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -30,6 +30,8 @@ from unittest.mock import MagicMock +pytestmark = pytest.mark.frontend + status_codes = requests.status_codes.codes diff --git a/tests/pytest.ini b/tests/pytest.ini index b23730eb2..249ced5fe 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -2,4 +2,3 @@ markers = backends(name1, name2, ...): test applies to named backends only frontend: test applies to frontend only - api_client: test applies to API Client only From 13ef3c2fa4ff69ee96d5d5f01b68d74af3e49124 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 9 Jul 2019 13:46:38 -0400 Subject: [PATCH 058/335] Make name argument required, raise Exception with error response - ResourceManager.handle_error_response will now raises exceptions - An "errors" attribute was added to the manager to track errors --- strawberryfields/api_client.py | 34 ++++++++----------------------- strawberryfields/engine.py | 2 +- tests/frontend/test_api_client.py | 10 ++++----- tests/frontend/test_engine.py | 10 ++++----- 4 files changed, 19 insertions(+), 37 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index fed3a379f..e5b563a61 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -257,6 +257,7 @@ class ResourceManager: """ http_status_code = None + errors = None def __init__(self, resource, client=None): """ @@ -267,6 +268,7 @@ def __init__(self, resource, client=None): """ self.resource = resource self.client = client or APIClient() + self.errors = [] def join_path(self, path): """ @@ -351,32 +353,12 @@ def handle_error_response(self, response): response (requests.Response): a response object to be parsed """ - # TODO: Improve error messaging and parse the actual error output (json). - - if response.status_code in (400, 404, 409): - warnings.warn( - "The server did not accept the request, and returned an error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - elif response.status_code == 401: - warnings.warn( - "The server did not accept the request due to an authentication error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - elif response.status_code in (500, 503, 504): - warnings.warn( - "The client encountered an unexpected temporary server error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - else: - warnings.warn( - "The client encountered an unexpected server error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) + error = {"status_code": response.status_code, "content": response.json()} + self.errors.append(error) + try: + response.raise_for_status() + except Exception as e: + raise Exception(response.text) from e def refresh_data(self, data): """ diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 49bd2360e..f05840ca0 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -530,7 +530,7 @@ def _run_program(self, program, **kwargs): job.result.manager.get() return job.result.result.value - def run(self, program, shots=1, name="", **kwargs): + def run(self, program, name, shots=1, **kwargs): """Compile the given program and execute it by queuing a job in the Starship. For the :class:`Program` instance given as input, the following happens: diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 54b203d1c..87f482f91 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -133,7 +133,6 @@ def raise_for_status(self): raise requests.exceptions.HTTPError() -@pytest.mark.api_client class TestAPIClient: def test_init_default_client(self): """ @@ -210,7 +209,6 @@ def test_join_path(self, client): assert client.join_path("jobs") == "{client.BASE_URL}/jobs".format(client=client) -@pytest.mark.api_client class TestResourceManager: def test_init(self): """ @@ -357,7 +355,6 @@ def test_handle_refresh_data(self): field.set.assert_called_once_with(mock_data[field.name]) -@pytest.mark.api_client class TestJob: def test_create_created(self, monkeypatch): """ @@ -379,5 +376,8 @@ def test_create_bad_request(self, monkeypatch): monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(400)) job = Job() - job.manager.create(params={}) - assert job.manager.http_status_code == 400 + with pytest.raises(Exception): + job.manager.create(params={}) + assert len(job.manager.errors) == 1 + assert job.manager.errors[0]["status_code"] == 400 + assert job.manager.errors[0]["content"] == MockPOSTResponse(400).json() diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 9e28191e7..d41e0583e 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -303,14 +303,14 @@ def test_run(self, starship_engine, monkeypatch): Tests StarshipEngine.run. It is expected that StarshipEngine._run is called with the correct parameters. """ - mock_run = MagicMock() - monkeypatch.setattr("strawberryfields.engine.BaseEngine._run", mock_run) + mock__run = MagicMock() + monkeypatch.setattr("strawberryfields.engine.BaseEngine._run", mock__run) inputs = MagicMock() inputs.params = {"param": MagicMock()} - starship_engine.run(inputs.program, inputs.shots, inputs.name, **inputs.params) - mock_run.assert_called_once_with( + starship_engine.run(inputs.program, inputs.name, inputs.shots, **inputs.params) + mock__run.assert_called_once_with( inputs.program, shots=inputs.shots, name=inputs.name, param=inputs.params["param"] ) @@ -362,6 +362,6 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): ops.MeasureFock() | [2] ops.MeasureFock() | [3] - engine.run(prog) + engine.run(prog, "SomeProg") mock_api_client_post.assert_called_once() From fc5ef2b7c53b07b217666377862fdbc7e1e257f0 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 9 Jul 2019 13:55:42 -0400 Subject: [PATCH 059/335] Adding errors and better error handling to managers, update requirements --- requirements.txt | 2 ++ setup.py | 2 ++ strawberryfields/api_client.py | 37 +++++++------------------------ tests/frontend/test_api_client.py | 10 ++++----- 4 files changed, 17 insertions(+), 34 deletions(-) diff --git a/requirements.txt b/requirements.txt index 94665f2ba..f3c9027fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,5 @@ python-dateutil==2.8.0 hafnian>=0.6 toml appdirs +requests==2.22.0 +urllib3==1.25.3 diff --git a/setup.py b/setup.py index cb4a19932..416c7d5ed 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,8 @@ "hafnian>=0.6", "toml", "appdirs", + "requests>=2.22.0", + "urllib3>=1.25.3", ] # extra_requirements = [ diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 04d46735d..e5b563a61 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -177,9 +177,6 @@ def __init__(self, **kwargs): if self.AUTHENTICATION_TOKEN: self.set_authorization_header(self.AUTHENTICATION_TOKEN) - # Configuration keys are added here for convenience - self.CONFIG_KEYS = config.keys() - def get_configuration_from_config(self): """ Retrieve configuration from environment variables or config file based on Strawberry Fields @@ -260,6 +257,7 @@ class ResourceManager: """ http_status_code = None + errors = None def __init__(self, resource, client=None): """ @@ -270,6 +268,7 @@ def __init__(self, resource, client=None): """ self.resource = resource self.client = client or APIClient() + self.errors = [] def join_path(self, path): """ @@ -354,32 +353,12 @@ def handle_error_response(self, response): response (requests.Response): a response object to be parsed """ - # TODO: Improve error messaging and parse the actual error output (json). - - if response.status_code in (400, 404, 409): - warnings.warn( - "The server did not accept the request, and returned an error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - elif response.status_code == 401: - warnings.warn( - "The server did not accept the request due to an authentication error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - elif response.status_code in (500, 503, 504): - warnings.warn( - "The client encountered an unexpected temporary server error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) - else: - warnings.warn( - "The client encountered an unexpected server error " - "({}: {}).".format(response.status_code, response.text), - UserWarning, - ) + error = {"status_code": response.status_code, "content": response.json()} + self.errors.append(error) + try: + response.raise_for_status() + except Exception as e: + raise Exception(response.text) from e def refresh_data(self, data): """ diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 54b203d1c..87f482f91 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -133,7 +133,6 @@ def raise_for_status(self): raise requests.exceptions.HTTPError() -@pytest.mark.api_client class TestAPIClient: def test_init_default_client(self): """ @@ -210,7 +209,6 @@ def test_join_path(self, client): assert client.join_path("jobs") == "{client.BASE_URL}/jobs".format(client=client) -@pytest.mark.api_client class TestResourceManager: def test_init(self): """ @@ -357,7 +355,6 @@ def test_handle_refresh_data(self): field.set.assert_called_once_with(mock_data[field.name]) -@pytest.mark.api_client class TestJob: def test_create_created(self, monkeypatch): """ @@ -379,5 +376,8 @@ def test_create_bad_request(self, monkeypatch): monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(400)) job = Job() - job.manager.create(params={}) - assert job.manager.http_status_code == 400 + with pytest.raises(Exception): + job.manager.create(params={}) + assert len(job.manager.errors) == 1 + assert job.manager.errors[0]["status_code"] == 400 + assert job.manager.errors[0]["content"] == MockPOSTResponse(400).json() From 1faf7cedd715d10cb62d759f296d66569dac9197 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Sat, 13 Jul 2019 21:49:31 +0800 Subject: [PATCH 060/335] remove name --- strawberryfields/engine.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 502f68ccc..4836c0235 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -461,13 +461,12 @@ def reset(self): super().reset(backend_options={}) self.jobs.clear() - def _get_blackbird(self, name, shots, program): + def _get_blackbird(self, shots, program): """ Returns a Blackbird object to be sent later to the server when creating a job. Assumes the current backend as the target. Args: - name (str): the name of the job to be created (e.g. StateTeleportation) shots (int): the number of shots program (Program): program to be converted to Blackbird code @@ -475,11 +474,10 @@ def _get_blackbird(self, name, shots, program): blackbird.BlackbirdProgram """ bb = to_blackbird(program, version="1.0") - bb._name = name # TODO: This is potentially not needed here bb._target["name"] = self.backend_name - + # TODO: set up shots pass-through once PR #130 is merged bb._target["options"] = {"shots": shots} return bb @@ -543,7 +541,7 @@ def _run_program(self, program, **kwargs): job.result.manager.get() return job.result.result.value - def run(self, program, name, shots=1, **kwargs): + def run(self, program, shots=1, **kwargs): """Compile the given program and execute it by queuing a job in the Starship. For the :class:`Program` instance given as input, the following happens: @@ -558,7 +556,6 @@ def run(self, program, name, shots=1, **kwargs): Args: program (Program, Sequence[Program]): quantum programs to run - name (str): The name of the program (an arbitrary string) shots (int): number of times the program measurement evaluation is repeated The ``kwargs`` keyword arguments are passed to :meth:`_run_program`. @@ -567,7 +564,7 @@ def run(self, program, name, shots=1, **kwargs): Result: results of the computation """ - return super()._run(program, shots=shots, name=name, **kwargs) + return super()._run(program, shots=shots, **kwargs) Engine = LocalEngine # alias for backwards compatibility From b01ee06c3687a48d1110c08290c8115de53cbe98 Mon Sep 17 00:00:00 2001 From: Nathan Killoran Date: Sat, 13 Jul 2019 11:22:02 -0400 Subject: [PATCH 061/335] fixing tests --- tests/frontend/test_engine.py | 36 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index d41e0583e..01c117928 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -201,10 +201,9 @@ def test__get_blackbird(self, starship_engine, monkeypatch): monkeypatch.setattr("strawberryfields.engine.to_blackbird", methods.to_blackbird) - output = starship_engine._get_blackbird(inputs.name, inputs.shots, inputs.program) + output = starship_engine._get_blackbird(inputs.shots, inputs.program) methods.to_blackbird.assert_called_once_with(inputs.program, version="1.0") - assert output._name == inputs.name assert len(output._target.__setitem__.call_args_list) == 2 assert output._target.__setitem__.call_args_list[0] == call( "name", starship_engine.backend_name @@ -309,9 +308,9 @@ def test_run(self, starship_engine, monkeypatch): inputs = MagicMock() inputs.params = {"param": MagicMock()} - starship_engine.run(inputs.program, inputs.name, inputs.shots, **inputs.params) + starship_engine.run(inputs.program, inputs.shots, **inputs.params) mock__run.assert_called_once_with( - inputs.program, shots=inputs.shots, name=inputs.name, param=inputs.params["param"] + inputs.program, shots=inputs.shots, param=inputs.params["param"] ) def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): @@ -347,21 +346,18 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): prog = sf.Program(4) with prog.context as q: - ops.S2gate(2) | [0, 2] - ops.S2gate(2) | [1, 3] - ops.Rgate(3) | 0 - ops.BSgate() | [0, 1] - ops.Rgate(3) | 0 - ops.Rgate(3) | 1 - ops.Rgate(3) | 2 - ops.BSgate() | [2, 3] - ops.Rgate(3) | 2 - ops.Rgate(3) | 3 - ops.MeasureFock() | [0] - ops.MeasureFock() | [1] - ops.MeasureFock() | [2] - ops.MeasureFock() | [3] - - engine.run(prog, "SomeProg") + ops.S2gate(2) | (q[0], q[2]) + ops.S2gate(2) | (q[1], q[3]) + ops.Rgate(3) | q[0] + ops.BSgate() | (q[0], q[1]) + ops.Rgate(3) | q[0] + ops.BSgate(3) | (q[0], q[1]) + ops.Rgate(3) | q[2] + ops.BSgate() | (q[2], q[3]) + ops.Rgate(3) | q[2] + ops.BSgate() | (q[2], q[3]) + ops.MeasureFock() | q + + engine.run(prog) mock_api_client_post.assert_called_once() From addb0d26dfe2adcf5d6e558dc39ac0ea11492308 Mon Sep 17 00:00:00 2001 From: Nathan Killoran Date: Sat, 13 Jul 2019 15:01:04 -0400 Subject: [PATCH 062/335] passing through shots and fixing failing tests in base branch after merging from master --- strawberryfields/engine.py | 7 ++++++- tests/frontend/test_engine.py | 5 +++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index b5bcb117e..4707d1e30 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -451,6 +451,11 @@ def __init__(self, polling_delay_seconds=1, **kwargs): backend = "chip0" super().__init__(backend) + # todo: move this into backend class + class Chip0Backend(BaseBackend): + circuit_spec = "chip0" + self.backend = Chip0Backend() + api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG["api"].keys()} self.client = APIClient(**api_client_params) self.polling_delay_seconds = polling_delay_seconds @@ -490,7 +495,6 @@ def _get_blackbird(self, shots, program): # TODO: This is potentially not needed here bb._target["name"] = self.backend_name - # TODO: set up shots pass-through once PR #130 is merged bb._target["options"] = {"shots": shots} return bb @@ -534,6 +538,7 @@ def _run_program(self, program, **kwargs): "A job is already queued. Please reset the engine and try again." ) + kwargs.update(program.run_options) job_content = self._get_blackbird(program=program, **kwargs).serialize() job = self._queue_job(job_content) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 49cd21402..8554c1cca 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -275,7 +275,7 @@ def test__run(self, starship_engine, monkeypatch): """ inputs = MagicMock() - inputs.shots = 1 + inputs.shots = 5 outputs = MagicMock() methods = MagicMock() @@ -283,11 +283,12 @@ def test__run(self, starship_engine, monkeypatch): monkeypatch.setattr(starship_engine, "HARDWARE_BACKENDS", [str(inputs.mock_backend)]) monkeypatch.setattr(starship_engine, "_run_program", methods._run_program) monkeypatch.setattr("strawberryfields.engine.Result", outputs.result) + monkeypatch.setattr(starship_engine, "backend", inputs.mock_backend) result = starship_engine._run(inputs.program, shots=inputs.shots) assert starship_engine.backend_name in starship_engine.HARDWARE_BACKENDS - inputs.program.compile.assert_called_once_with(starship_engine.backend_name) + inputs.program.compile.assert_called_once_with(starship_engine.backend.circuit_spec) mock_compiled_program = inputs.program.compile(starship_engine.backend_name) mock_compiled_program.lock.assert_called_once() methods._run_program.assert_called_once_with(mock_compiled_program, shots=inputs.shots) From d57a5073d58e9d84e599c6093993e502455a1c4d Mon Sep 17 00:00:00 2001 From: Zeid Date: Mon, 15 Jul 2019 08:42:05 -0400 Subject: [PATCH 063/335] Add _dev to build packages so config import does not fail --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 14f8f3880..4406fff8f 100644 --- a/setup.py +++ b/setup.py @@ -56,6 +56,7 @@ "strawberryfields.backends.tfbackend", "strawberryfields.backends.fockbackend", "strawberryfields.backends.gaussianbackend", + "strawberryfields._dev", ], "package_data": {"strawberryfields": ["backends/data/*"]}, "include_package_data": True, From 2ed02ba246b4761f984ef546be20dbff78590111 Mon Sep 17 00:00:00 2001 From: Zeid Date: Mon, 15 Jul 2019 09:46:21 -0400 Subject: [PATCH 064/335] Revert "moving config file to temp location before release (#110)" This reverts commit 1ff525bfc13ce8379445bb6a0a658b833fedf767. --- strawberryfields/{_dev => }/configuration.py | 0 tests/frontend/test_configuration.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename strawberryfields/{_dev => }/configuration.py (100%) diff --git a/strawberryfields/_dev/configuration.py b/strawberryfields/configuration.py similarity index 100% rename from strawberryfields/_dev/configuration.py rename to strawberryfields/configuration.py diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 3582cf065..cf3eb98db 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -18,7 +18,7 @@ import toml -from strawberryfields._dev import configuration as conf +from strawberryfields import configuration as conf pytestmark = pytest.mark.frontend logging.getLogger().setLevel(1) From 7478285a8196408994e007f813d65cc0bdec1fc1 Mon Sep 17 00:00:00 2001 From: Zeid Date: Mon, 15 Jul 2019 09:47:21 -0400 Subject: [PATCH 065/335] Revert "Add _dev to build packages so config import does not fail" This reverts commit d57a5073d58e9d84e599c6093993e502455a1c4d. --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 4406fff8f..14f8f3880 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,6 @@ "strawberryfields.backends.tfbackend", "strawberryfields.backends.fockbackend", "strawberryfields.backends.gaussianbackend", - "strawberryfields._dev", ], "package_data": {"strawberryfields": ["backends/data/*"]}, "include_package_data": True, From 7301eaae8a197f600cd4313f90234386f7c15194 Mon Sep 17 00:00:00 2001 From: Zeid Date: Mon, 15 Jul 2019 10:48:50 -0400 Subject: [PATCH 066/335] Fix references to _dev --- strawberryfields/api_client.py | 2 +- strawberryfields/engine.py | 2 +- tests/frontend/test_api_client.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index e5b563a61..c94ee4962 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -83,7 +83,7 @@ import dateutil.parser import requests -from strawberryfields._dev import configuration +from strawberryfields import configuration def join_path(base_path, path): diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 4707d1e30..8d2cb4a68 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -82,7 +82,7 @@ from strawberryfields.api_client import APIClient, Job, JobNotQueuedError, JobExecutionError from strawberryfields.io import to_blackbird -from strawberryfields._dev.configuration import DEFAULT_CONFIG +from strawberryfields.configuration import DEFAULT_CONFIG class OneJobAtATimeError(Exception): diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 87f482f91..06b9a8091 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -19,7 +19,7 @@ import pytest import json from strawberryfields import api_client -from strawberryfields._dev import configuration +from strawberryfields import configuration from strawberryfields.api_client import ( requests, Job, From 5be715b6fa71e5caf7347faa5dcccd5ea0277557 Mon Sep 17 00:00:00 2001 From: Zeid Date: Mon, 15 Jul 2019 11:18:59 -0400 Subject: [PATCH 067/335] Fetch field.value to get correct value of field --- strawberryfields/api_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index c94ee4962..49cf9caa6 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -504,10 +504,10 @@ def refresh_data(self): Refresh the job fields and attach a JobResult and JobCircuit object to the Job instance. """ if self.result is None: - self.result = JobResult(self.id, client=self.manager.client) + self.result = JobResult(self.id.value, client=self.manager.client) if self.circuit is None: - self.circuit = JobCircuit(self.id, client=self.manager.client) + self.circuit = JobCircuit(self.id.value, client=self.manager.client) class JobResult(Resource): From 87376f0d0384deb60fa95607cbb361a6461add24 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 23 Jul 2019 11:00:42 -0400 Subject: [PATCH 068/335] Fix KeyError when config key is missing, remove unused variables --- strawberryfields/api_client.py | 25 ++++++++++++------------- strawberryfields/configuration.py | 2 +- tests/frontend/test_api_client.py | 3 +-- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 49cf9caa6..678100c01 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -141,37 +141,36 @@ class APIClient: USER_AGENT = "strawberryfields-api-client/0.1" - ALLOWED_HOSTNAMES = ["localhost", "localhost:8080", "platform.strawberryfields.ai"] + ALLOWED_HOSTNAMES = [ + "localhost", + "localhost:8080", + "platform.strawberryfields.ai", + ] DEFAULT_HOSTNAME = "localhost" - ENV_KEY_PREFIX = "SF_API_" - ENV_AUTHENTICATION_TOKEN_KEY = "{}AUTHENTICATION_TOKEN".format(ENV_KEY_PREFIX) - ENV_API_HOSTNAME_KEY = "{}API_HOSTNAME".format(ENV_KEY_PREFIX) - ENV_USE_SSL_KEY = "{}USE_SSL".format(ENV_KEY_PREFIX) - def __init__(self, **kwargs): """ Initialize the API client with various parameters. """ - config = self.get_configuration_from_config() + self._config = self.get_configuration_from_config() # Override any values that are explicitly passed when initializing client - config.update(kwargs) + self._config.update(kwargs) - if config["hostname"] is None: + if self._config["hostname"] is None: raise ValueError("hostname parameter is missing") - if config["hostname"] not in self.ALLOWED_HOSTNAMES: + if self._config["hostname"] not in self.ALLOWED_HOSTNAMES: raise ValueError("hostname parameter not in allowed list") - self.USE_SSL = config["use_ssl"] + self.USE_SSL = self._config["use_ssl"] if not self.USE_SSL: warnings.warn("Connecting insecurely to API server", UserWarning) - self.HOSTNAME = config["hostname"] + self.HOSTNAME = self._config["hostname"] self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) - self.AUTHENTICATION_TOKEN = config["authentication_token"] + self.AUTHENTICATION_TOKEN = self._config["authentication_token"] self.HEADERS = {"User-Agent": self.USER_AGENT} if self.AUTHENTICATION_TOKEN: diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index de55131f0..7b890a50c 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -160,7 +160,7 @@ def update_config(self): if env in os.environ: # Update from environment variable self._config[section][key] = os.environ[env] - elif self._config_file: + elif self._config_file and key in self._config_file[section]: # Update from configuration file self._config[section][key] = self._config_file[section][key] diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 06b9a8091..3527fe5b0 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -138,7 +138,7 @@ def test_init_default_client(self): """ Test that initializing a default client generates an APIClient with the expected params. """ - client = api_client.APIClient() + client = api_client.APIClient(hostname="localhost") assert client.USE_SSL is True assert not client.AUTHENTICATION_TOKEN assert client.BASE_URL == "https://localhost" @@ -152,7 +152,6 @@ def test_init_default_client_no_ssl(self): client = api_client.APIClient(use_ssl=False) assert client.USE_SSL is False assert not client.AUTHENTICATION_TOKEN - assert client.BASE_URL == "http://localhost" assert client.HEADERS["User-Agent"] == client.USER_AGENT def test_init_custom_token_client(self): From 8c1f7a6682011f3cbef18e1205fb1276dc8d853d Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 23 Jul 2019 14:18:27 -0400 Subject: [PATCH 069/335] Implement debug mode Debug mode stores failed requests, as well as all responses in the client object. --- strawberryfields/api_client.py | 58 ++++++++++++++++++++-------- strawberryfields/configuration.py | 4 +- strawberryfields/engine.py | 3 +- tests/frontend/test_api_client.py | 7 ++-- tests/frontend/test_configuration.py | 2 + 5 files changed, 51 insertions(+), 23 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 678100c01..c92f2b881 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -81,8 +81,8 @@ import warnings import dateutil.parser -import requests +import requests from strawberryfields import configuration @@ -141,11 +141,7 @@ class APIClient: USER_AGENT = "strawberryfields-api-client/0.1" - ALLOWED_HOSTNAMES = [ - "localhost", - "localhost:8080", - "platform.strawberryfields.ai", - ] + ALLOWED_HOSTNAMES = ["localhost", "localhost:8080", "platform.strawberryfields.ai"] DEFAULT_HOSTNAME = "localhost" @@ -154,6 +150,7 @@ def __init__(self, **kwargs): Initialize the API client with various parameters. """ self._config = self.get_configuration_from_config() + self._config["debug"] = self._config["debug"] in (True, "True", "true", "TRUE", 1) # Override any values that are explicitly passed when initializing client self._config.update(kwargs) @@ -172,10 +169,15 @@ def __init__(self, **kwargs): self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) self.AUTHENTICATION_TOKEN = self._config["authentication_token"] self.HEADERS = {"User-Agent": self.USER_AGENT} + self.DEBUG = self._config["debug"] if self.AUTHENTICATION_TOKEN: self.set_authorization_header(self.AUTHENTICATION_TOKEN) + if self.DEBUG: + self.errors = [] + self.responses = [] + def get_configuration_from_config(self): """ Retrieve configuration from environment variables or config file based on Strawberry Fields @@ -216,6 +218,35 @@ def join_path(self, path): """ return join_path(self.BASE_URL, path) + def request(self, method, **params): + """ + Calls ``method`` with ``params`` after applying headers. Records the request type and + parameters to ``self.errors`` if the request is not successful, and the response to + ``self.responses`` if a response is returned from the server. + + Args: + method: one of ``requests.get`` or ``requests.post`` + **params: the parameters to pass on to the method (e.g. ``url``, ``data``, etc.) + + Returns: + requests.Response: a response object, or None if no response could be fetched + """ + assert method in (requests.get, requests.post) + + params["headers"] = self.HEADERS + + try: + response = method(**params) + except Exception as e: + if self.DEBUG: + self.errors.append((method, params, e)) + raise + + if self.DEBUG: + self.responses.append(response) + + return response + def get(self, path): """ Sends a GET request to the provided path. Returns a response object. @@ -224,12 +255,9 @@ def get(self, path): path (str): path to send the GET request to Returns: - requests.Response: A response object, or None if no response could be fetched from the - server. + requests.Response: A response object, or None if no response could be fetched """ - # TODO: better error handling (e.g. ConnectionError) - response = requests.get(url=self.join_path(path), headers=self.HEADERS) - return response + return self.request(requests.get, url=self.join_path(path)) def post(self, path, payload): """ @@ -241,13 +269,9 @@ def post(self, path, payload): payload: JSON serializable object to be sent to the server Returns: - requests.Response: A response object, or None if no response could be fetched from the - server. + requests.Response: A response object, or None if no response could be fetched """ - # TODO: catch any exceptions from dumping JSON, and handle request errors - data = json.dumps(payload) - response = requests.post(url=self.join_path(path), headers=self.HEADERS, data=data) - return response + return self.request(requests.post, url=self.join_path(path), data=json.dumps(payload)) class ResourceManager: diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7b890a50c..8eb7a09d8 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -95,7 +95,9 @@ log.getLogger() -DEFAULT_CONFIG = {"api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True}} +DEFAULT_CONFIG = { + "api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True, "debug": False} +} class ConfigurationError(Exception): diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 8d2cb4a68..78695f770 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -308,7 +308,8 @@ def _broadcast_nones(val, dim): self.run_progs.append(p) prev = p - return Result(self.samples.copy()) + if self.samples: + return Result(self.samples.copy()) class LocalEngine(BaseEngine): diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 3527fe5b0..85e026993 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -138,10 +138,10 @@ def test_init_default_client(self): """ Test that initializing a default client generates an APIClient with the expected params. """ - client = api_client.APIClient(hostname="localhost") + client = api_client.APIClient(use_ssl=True, authentication_token="") assert client.USE_SSL is True assert not client.AUTHENTICATION_TOKEN - assert client.BASE_URL == "https://localhost" + assert client.BASE_URL.startswith("https://") assert client.HEADERS["User-Agent"] == client.USER_AGENT def test_init_default_client_no_ssl(self): @@ -151,8 +151,7 @@ def test_init_default_client_no_ssl(self): """ client = api_client.APIClient(use_ssl=False) assert client.USE_SSL is False - assert not client.AUTHENTICATION_TOKEN - assert client.HEADERS["User-Agent"] == client.USER_AGENT + assert client.BASE_URL.startswith("http://") def test_init_custom_token_client(self): """ diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index cf3eb98db..1f1005682 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -30,6 +30,7 @@ authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true +debug = false """ EXPECTED_CONFIG = { @@ -37,6 +38,7 @@ "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", "hostname": "localhost", "use_ssl": True, + "debug": False, } } From aef09a0c0b962a22f0df2882b281df9928ecabf9 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 23 Jul 2019 14:30:10 -0400 Subject: [PATCH 070/335] Minor fix to documentation --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7b890a50c..50f182a62 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -34,7 +34,7 @@ * On Linux: ``~/.config/strawberryfields`` * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` - * On MacOS: ``~/Library/Preferences/strawberryfields`` + * On MacOS: ``~/Library/Application\ Support/strawberryfields`` If no configuration file is found, a warning message will be displayed in the logs, and all device parameters will need to be passed as keyword arguments when From 3a79bdd376426c32bd4f73f295f9d9267722b676 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 09:49:28 -0400 Subject: [PATCH 071/335] Add starship helper command This command helps execute .xbb files from the command line, as well as to automatically reconfigure the api parameters and write them to a config file, if needed. --- starship | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100755 starship diff --git a/starship b/starship new file mode 100755 index 000000000..b15a4dd96 --- /dev/null +++ b/starship @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +import os +import sys +import argparse +import pdb + +from strawberryfields.engine import StarshipEngine +from strawberryfields.io import load +from strawberryfields import configuration + +DEFAULT_HOSTNAME = "platform.strawberryfields.ai" +PROMPTS = { + "hostname": "Please enter the hostname of the server to connect to: [{}] ", + "authentication_token": "Please enter the authentication token to use when connecting: [{}] ", + "save": "Would you like to save these settings to a local cofiguration file in the current " + "directory? [Y/n] ", +} + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run Strawberry Fields code on StarshipEngine") + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument("--input", "-i", help="The XBB file to run") + parser.add_argument( + "--output", + "-o", + help="Where to output the result of the program. Outputs to stdout by default", + ) + parser.add_argument( + "--debug", action="store_true", help="Returns a pdb shell after executing the program" + ) + group.add_argument( + "--reconfigure", + action="store_true", + help="An interactive tool to reconfigure the API connection before executing the program", + ) + + args = parser.parse_args() + + if args.reconfigure: + config = configuration.Configuration() + + hostname = ( + input(PROMPTS["hostname"].format(config.api["hostname"])) or config.api["hostname"] + ) + authentication_token = ( + input(PROMPTS["authentication_token"].format(config.api["authentication_token"])) + or config.api["hostname"] + ) + save = input(PROMPTS["save"]).upper() == "Y" + + if not save: + sys.stdout.write("Not writing configuration to file...") + else: + if not os.path.isfile("config.toml"): + sys.stdout.write("Writing configuration file to current working directory...\n") + else: + sys.stdout.write("Updating configuration in current working directory...\n") + + config.api["hostname"] = hostname + config.api["authentication_token"] = authentication_token + config.save("config.toml") + sys.exit() + + eng = StarshipEngine() + program = load(args.input) + eng.run(program) + + if args.output_path: + with open(args.output_path, "w") as file: + file.write(str(eng.samples)) + file.close() + else: + sys.stdout.write(str(eng.samples)) + + if args.debug: + pdb.set_trace() From 63cf6b9c28534f1f729f1b2c484b4bf570fa0b99 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 09:53:59 -0400 Subject: [PATCH 072/335] Minor style changes --- starship | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/starship b/starship index b15a4dd96..b9d0251ac 100755 --- a/starship +++ b/starship @@ -18,21 +18,21 @@ PROMPTS = { } if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run Strawberry Fields code on StarshipEngine") + parser = argparse.ArgumentParser(description="run Strawberry Fields code on StarshipEngine") group = parser.add_mutually_exclusive_group(required=True) - group.add_argument("--input", "-i", help="The XBB file to run") + group.add_argument("--input", "-i", help="the XBB file to run") parser.add_argument( "--output", "-o", - help="Where to output the result of the program. Outputs to stdout by default", + help="where to output the result of the program - outputs to stdout by default", ) parser.add_argument( - "--debug", action="store_true", help="Returns a pdb shell after executing the program" + "--debug", action="store_true", help="returns a pdb shell after executing the program" ) group.add_argument( "--reconfigure", action="store_true", - help="An interactive tool to reconfigure the API connection before executing the program", + help="an interactive tool to reconfigure the API connection before executing the program", ) args = parser.parse_args() From 609375b1a72eb62ff425836a2840943300cfa63e Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 10:43:35 -0400 Subject: [PATCH 073/335] Remove unnecessary variable --- starship | 1 - 1 file changed, 1 deletion(-) diff --git a/starship b/starship index b9d0251ac..1c65afb8c 100755 --- a/starship +++ b/starship @@ -9,7 +9,6 @@ from strawberryfields.engine import StarshipEngine from strawberryfields.io import load from strawberryfields import configuration -DEFAULT_HOSTNAME = "platform.strawberryfields.ai" PROMPTS = { "hostname": "Please enter the hostname of the server to connect to: [{}] ", "authentication_token": "Please enter the authentication token to use when connecting: [{}] ", From f253fa225cb8a1647073bd2ee375e17984309c84 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 10:53:07 -0400 Subject: [PATCH 074/335] Add scripts to setup.py --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 14f8f3880..3e82619d4 100644 --- a/setup.py +++ b/setup.py @@ -67,6 +67,7 @@ "command_options": { "build_sphinx": {"version": ("setup.py", version), "release": ("setup.py", version)} }, + "scripts": ["starship"], } classifiers = [ From c6d1e83246471e9166b0a040e6b14f1c7de15e84 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 10:59:31 -0400 Subject: [PATCH 075/335] Use Result object instead of engine when getting samples --- starship | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/starship b/starship index 1c65afb8c..693cedec5 100755 --- a/starship +++ b/starship @@ -63,14 +63,14 @@ if __name__ == "__main__": eng = StarshipEngine() program = load(args.input) - eng.run(program) + result = eng.run(program) if args.output_path: with open(args.output_path, "w") as file: - file.write(str(eng.samples)) + file.write(str(result.samples)) file.close() else: - sys.stdout.write(str(eng.samples)) + sys.stdout.write(str(result.samples)) if args.debug: pdb.set_trace() From 6fdeb65e278781211865442c30c5bb13b6ce2c9a Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 11:01:02 -0400 Subject: [PATCH 076/335] Remove redundand file.close() --- starship | 1 - 1 file changed, 1 deletion(-) diff --git a/starship b/starship index 693cedec5..91070fe16 100755 --- a/starship +++ b/starship @@ -68,7 +68,6 @@ if __name__ == "__main__": if args.output_path: with open(args.output_path, "w") as file: file.write(str(result.samples)) - file.close() else: sys.stdout.write(str(result.samples)) From 0e6e69645483942c420a643e3c791638a16eb9e0 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 11:35:48 -0400 Subject: [PATCH 077/335] Add copyright notice --- starship | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/starship b/starship index 91070fe16..a041ef4f6 100755 --- a/starship +++ b/starship @@ -1,5 +1,20 @@ #!/usr/bin/env python +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import os import sys import argparse From f79a02afa1da388772b37a87123ff142dfd17fa8 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 13:02:57 -0400 Subject: [PATCH 078/335] Bug fix --- starship | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/starship b/starship index a041ef4f6..8be49d4a9 100755 --- a/starship +++ b/starship @@ -59,7 +59,7 @@ if __name__ == "__main__": ) authentication_token = ( input(PROMPTS["authentication_token"].format(config.api["authentication_token"])) - or config.api["hostname"] + or config.api["authentication_token"] ) save = input(PROMPTS["save"]).upper() == "Y" @@ -80,7 +80,7 @@ if __name__ == "__main__": program = load(args.input) result = eng.run(program) - if args.output_path: + if hasattr(args, "output_path"): with open(args.output_path, "w") as file: file.write(str(result.samples)) else: From f9547690d532d69c260a76bb0b4ae4b9b09e1303 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 13:26:40 -0400 Subject: [PATCH 079/335] Reset engine after job finishes; do not append to run_progs if using HW --- strawberryfields/engine.py | 7 ++++++- tests/frontend/test_engine.py | 1 - 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 8d2cb4a68..9622a8369 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -304,8 +304,8 @@ def _broadcast_nones(val, dim): self.samples = [ _broadcast_nones(p.reg_refs[k].val, kwargs["shots"]) for k in sorted(p.reg_refs) ] + self.run_progs.append(p) - self.run_progs.append(p) prev = p return Result(self.samples.copy()) @@ -550,8 +550,13 @@ def _run_program(self, program, **kwargs): if job.id: print("Job {} is queued in the background.".format(job.id.value)) else: + self.reset() raise JobNotQueuedError("Job was not sent to server. Please try again.") + # Job either failed or is complete - in either case, clear the job queue so that the engine is + # ready for future jobs. + self.reset() + if job.is_failed: # TODO: Add failure details here. Should this exception be raised elsewhere? raise JobExecutionError("Job execution failed. Please try again.") diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 8554c1cca..7227e6b91 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -295,7 +295,6 @@ def test__run(self, starship_engine, monkeypatch): assert starship_engine.samples == starship_engine._run_program( mock_compiled_program, shots=inputs.shots ) - assert starship_engine.run_progs == [mock_compiled_program] assert result == outputs.result(starship_engine.samples) def test_run(self, starship_engine, monkeypatch): From 7606fb0ba580105fda5c59b3f1d90d455a8a453f Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 13:29:19 -0400 Subject: [PATCH 080/335] Update test --- tests/frontend/test_engine.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 7227e6b91..4de8476c4 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -295,6 +295,7 @@ def test__run(self, starship_engine, monkeypatch): assert starship_engine.samples == starship_engine._run_program( mock_compiled_program, shots=inputs.shots ) + assert starship_engine.run_progs == [] assert result == outputs.result(starship_engine.samples) def test_run(self, starship_engine, monkeypatch): From 86e540b92a66f1291ee1fb026e180366985db12b Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 14:19:53 -0400 Subject: [PATCH 081/335] Better parsing of boolean environment variables --- strawberryfields/api_client.py | 1 - strawberryfields/configuration.py | 19 ++++++++++++++++++- tests/frontend/test_configuration.py | 19 +++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index c92f2b881..03e20b11a 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -150,7 +150,6 @@ def __init__(self, **kwargs): Initialize the API client with various parameters. """ self._config = self.get_configuration_from_config() - self._config["debug"] = self._config["debug"] in (True, "True", "true", "TRUE", 1) # Override any values that are explicitly passed when initializing client self._config.update(kwargs) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 8eb7a09d8..54be3eae7 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -99,6 +99,23 @@ "api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True, "debug": False} } +BOOLEAN_KEYS = ("debug", "use_ssl") + + +def parse_environment_variable(key, value): + trues = (True, "true", "True", "TRUE", "1", 1) + falses = (False, "false", "False", "FALSE", "0", 0) + + if key in BOOLEAN_KEYS: + if value in trues: + return True + elif value in falses: + return False + else: + raise ValueError("Boolean could not be parsed") + else: + return value + class ConfigurationError(Exception): """Exception used for configuration errors""" @@ -161,7 +178,7 @@ def update_config(self): if env in os.environ: # Update from environment variable - self._config[section][key] = os.environ[env] + self._config[section][key] = parse_environment_variable(env, os.environ[env]) elif self._config_file and key in self._config_file[section]: # Update from configuration file self._config[section][key] = self._config_file[section][key] diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 1f1005682..257718285 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -18,6 +18,8 @@ import toml +from unittest.mock import MagicMock + from strawberryfields import configuration as conf pytestmark = pytest.mark.frontend @@ -136,3 +138,20 @@ def test_env_vars_take_precedence(self, tmpdir): config = conf.Configuration(str(filename)) assert config.api["hostname"] == host + + def test_parse_environment_variable(self, monkeypatch): + monkeypatch.setattr(conf, "BOOLEAN_KEYS", ("some_boolean",)) + assert conf.parse_environment_variable("some_boolean", "true") is True + assert conf.parse_environment_variable("some_boolean", "True") is True + assert conf.parse_environment_variable("some_boolean", "TRUE") is True + assert conf.parse_environment_variable("some_boolean", "1") is True + assert conf.parse_environment_variable("some_boolean", 1) is True + + assert conf.parse_environment_variable("some_boolean", "false") is False + assert conf.parse_environment_variable("some_boolean", "False") is False + assert conf.parse_environment_variable("some_boolean", "FALSE") is False + assert conf.parse_environment_variable("some_boolean", "0") is False + assert conf.parse_environment_variable("some_boolean", 0) is False + + something_else = MagicMock() + assert conf.parse_environment_variable("not_a_boolean", something_else) == something_else From 3ed66b4c42d310e97cd8d98d13378c8f489f5e0d Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 14:25:28 -0400 Subject: [PATCH 082/335] Code review feedback --- strawberryfields/api_client.py | 4 +++- strawberryfields/engine.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 03e20b11a..d717a96b5 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -230,7 +230,9 @@ def request(self, method, **params): Returns: requests.Response: a response object, or None if no response could be fetched """ - assert method in (requests.get, requests.post) + supported_methods = (requests.get, requests.post) + if method not in supported_methods: + raise TypeError("Unexpected or unsupported method provided") params["headers"] = self.HEADERS diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 78695f770..f13001aa7 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -308,7 +308,7 @@ def _broadcast_nones(val, dim): self.run_progs.append(p) prev = p - if self.samples: + if self.samples is not None: return Result(self.samples.copy()) From 9c4e4c7db99b085258b36586aa883e48f789bf48 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 15:45:44 -0400 Subject: [PATCH 083/335] Update test program --- tests/frontend/test_engine.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 8554c1cca..4aa1f9f96 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -346,17 +346,24 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): monkeypatch.setattr(APIClient, "get", mock_get) prog = sf.Program(4) + + sqz0 = 1.0 + sqz1 = 1.0 + phi0 = 0.574 + phi1 = 1.33 + pi = 3.14 + with prog.context as q: - ops.S2gate(2) | (q[0], q[2]) - ops.S2gate(2) | (q[1], q[3]) - ops.Rgate(3) | q[0] - ops.BSgate() | (q[0], q[1]) - ops.Rgate(3) | q[0] - ops.BSgate(3) | (q[0], q[1]) - ops.Rgate(3) | q[2] - ops.BSgate() | (q[2], q[3]) - ops.Rgate(3) | q[2] - ops.BSgate() | (q[2], q[3]) + ops.S2gate(sqz0, 0.0) | (q[0], q[2]) + ops.S2gate(sqz1, 0.0) | (q[1], q[3]) + ops.Rgate(phi0) | q[0] + ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) + ops.Rgate(phi1) | q[0] + ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) + ops.Rgate(phi0) | q[2] + ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) + ops.Rgate(phi1) | q[2] + ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) ops.MeasureFock() | q engine.run(prog) From 6d6828d403c2faf07860cbb5356299be76c8b705 Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 24 Jul 2019 16:33:25 -0400 Subject: [PATCH 084/335] Tutorial --- doc/tutorials/starship.rst | 99 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 doc/tutorials/starship.rst diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst new file mode 100644 index 000000000..c2b934491 --- /dev/null +++ b/doc/tutorials/starship.rst @@ -0,0 +1,99 @@ +.. _starship: + +Running Jobs with StarshipEngine +################################ + +.. sectionauthor:: Zeid Zabaneh + +In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Strawberry Fields platform and execute jobs remotely (e.g. on a quantum chip). + +Configuring StarshipEngine +========================== + +Before using StarshipEngine, you need to configure the hostname and authentication token that will provide you access to the API. The easiest way is to create a configuration file named ``config.toml`` in your working directory. A typical file looks like this: + +.. code-block:: console + + [api] + hostname = "platform.example.com" + authentication_token = "ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc" + +You can generate this file interactively by using the ``starship`` command as follows, answering the questions in the prompts. + +.. code-block:: console + + (strawberryfields)$ starship --reconfigure + Please enter the hostname of the server to connect to: [platform.example.com] + Please enter the authentication token to use when connecting: [] ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc + Would you like to save these settings to a local cofiguration file in the current directory? [Y/n] y + Writing configuration file to current working directory... + + +To test connectivity, you can use the following command: + +.. code-block:: console + + (strawberryfields)$ starship --hello + You have successfully authenticated to the platform! + + +Executing your first program +============================ +The easiest way to execute a program using StarshipEngine is to create an ``xbb`` file and place it in your current working directory. Check the :ref:`blackbird tutorial ` for how to create this file. + +For this example, we will use the following file and save it to ``test.xbb`` in our current working directory: + +.. code-block:: + + name template_2x2_chip0 + version 1.0 + target chip0 (shots = 50) + + float sqz0 = 1.0 + float sqz1 = 1.0 + float phi0 = 0.574 + float phi1 = 1.33 + + # for n spatial degrees, first n signal modes, then n idler modes, phase zero + S2gate(sqz0, 0.0) | [0, 2] + S2gate(sqz1, 0.0) | [1, 3] + + # standard 2x2 interferometer for the signal modes (the lower ones in frequency) + Rgate(phi0) | [0] + BSgate(pi/4, pi/2) | [0, 1] + Rgate(phi1) | [0] + BSgate(pi/4, pi/2) | [0, 1] + + #duplicate the interferometer for the idler modes (the higher ones in frequency) + Rgate(phi0) | [2] + BSgate(pi/4, pi/2) | [2, 3] + Rgate(phi1) | [2] + BSgate(pi/4, pi/2) | [2, 3] + + # Measurement in Fock basis + MeasureFock() | [0, 1, 2, 3] + +After you have created your ``xbb`` file, you can execute it using the command line, or using a python shell. + +Executing your xbb file using Python +==================================== +To execute this file from using Python, you can use a code block like this: + +.. code-block:: python3 + + from strawberryfields import StarshipEngine + from strawberryfields.io import load + + prog = load("chip0.xbb") + result = eng.run(prog) + print(result.samples) + +Executing your xbb file from the command line +============================================= +To execute this file from the command line, use the ``starship`` command as follows: + +.. code-block:: console + + (strawberryfields)$ starship --input test.xbb --output out + +After executing the above command, the result will be stored in ``out`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. From a599a33ecca49eafea424fc8dcc37ff8fa4dbd64 Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Thu, 25 Jul 2019 08:31:56 -0400 Subject: [PATCH 085/335] Apply suggestions from code review (copy changes) Co-Authored-By: Nathan Killoran Co-Authored-By: Josh Izaac --- doc/tutorials/starship.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index c2b934491..1b77f811d 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -5,7 +5,7 @@ Running Jobs with StarshipEngine .. sectionauthor:: Zeid Zabaneh -In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Strawberry Fields platform and execute jobs remotely (e.g. on a quantum chip). +In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Xanadu cloud platform and execute jobs remotely (e.g., on a quantum chip). Configuring StarshipEngine ========================== @@ -39,11 +39,11 @@ To test connectivity, you can use the following command: Executing your first program ============================ -The easiest way to execute a program using StarshipEngine is to create an ``xbb`` file and place it in your current working directory. Check the :ref:`blackbird tutorial ` for how to create this file. +The easiest way to execute a program using StarshipEngine is to create a blackbird script (an ``xbb`` file) and place it in your current working directory. Check the :ref:`blackbird tutorial ` for how to create this file. For this example, we will use the following file and save it to ``test.xbb`` in our current working directory: -.. code-block:: +.. code-block:: python name template_2x2_chip0 version 1.0 @@ -77,7 +77,7 @@ After you have created your ``xbb`` file, you can execute it using the command l Executing your xbb file using Python ==================================== -To execute this file from using Python, you can use a code block like this: +To execute this file using Python, you can use a code block like this: .. code-block:: python3 From 6e3a1b79cb072ea0f829df5d02428e34d965781c Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Thu, 25 Jul 2019 08:33:30 -0400 Subject: [PATCH 086/335] Apply suggestions from code review (copy changes) Co-Authored-By: Nathan Killoran --- starship | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starship b/starship index 8be49d4a9..f9031d502 100755 --- a/starship +++ b/starship @@ -34,7 +34,7 @@ PROMPTS = { if __name__ == "__main__": parser = argparse.ArgumentParser(description="run Strawberry Fields code on StarshipEngine") group = parser.add_mutually_exclusive_group(required=True) - group.add_argument("--input", "-i", help="the XBB file to run") + group.add_argument("--input", "-i", help="the xbb file to run") parser.add_argument( "--output", "-o", From ead593b5e8c6ec8735680266f24cfa626baa35af Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 08:41:07 -0400 Subject: [PATCH 087/335] Minor copy change --- starship | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starship b/starship index 8be49d4a9..9031c4a7f 100755 --- a/starship +++ b/starship @@ -32,7 +32,7 @@ PROMPTS = { } if __name__ == "__main__": - parser = argparse.ArgumentParser(description="run Strawberry Fields code on StarshipEngine") + parser = argparse.ArgumentParser(description="run a blackbird script on StarshipEngine") group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--input", "-i", help="the XBB file to run") parser.add_argument( From fcc51b53204efc8b4317f8c9460cd14da53dee44 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 08:46:53 -0400 Subject: [PATCH 088/335] Minor copy change --- doc/tutorials/starship.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index 1b77f811d..12ff54390 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -22,7 +22,7 @@ You can generate this file interactively by using the ``starship`` command as fo .. code-block:: console - (strawberryfields)$ starship --reconfigure + starship --reconfigure Please enter the hostname of the server to connect to: [platform.example.com] Please enter the authentication token to use when connecting: [] ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc Would you like to save these settings to a local cofiguration file in the current directory? [Y/n] y @@ -33,7 +33,7 @@ To test connectivity, you can use the following command: .. code-block:: console - (strawberryfields)$ starship --hello + starship --hello You have successfully authenticated to the platform! @@ -94,6 +94,6 @@ To execute this file from the command line, use the ``starship`` command as foll .. code-block:: console - (strawberryfields)$ starship --input test.xbb --output out + starship --input test.xbb --output out.txt After executing the above command, the result will be stored in ``out`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. From cd31e2a5228e33db5b33ae259c71cc224e9b4012 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 09:07:55 -0400 Subject: [PATCH 089/335] Add missing line --- doc/tutorials/starship.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index 12ff54390..950bff389 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -84,6 +84,7 @@ To execute this file using Python, you can use a code block like this: from strawberryfields import StarshipEngine from strawberryfields.io import load + eng = StarshipEngine() prog = load("chip0.xbb") result = eng.run(prog) print(result.samples) From d78732a82e5de2bd22c4f6b5f029a97e01609a86 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 09:08:37 -0400 Subject: [PATCH 090/335] Minor copy change --- doc/tutorials/starship.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index 950bff389..b51e6e385 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -97,4 +97,4 @@ To execute this file from the command line, use the ``starship`` command as foll starship --input test.xbb --output out.txt -After executing the above command, the result will be stored in ``out`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. +After executing the above command, the result will be stored in ``out.txt`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. From 98580012df6a2c363d889914e5a8a18e1d6384fb Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 09:09:35 -0400 Subject: [PATCH 091/335] Minor copy change --- doc/tutorials/starship.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index b51e6e385..e47e263f0 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -85,7 +85,7 @@ To execute this file using Python, you can use a code block like this: from strawberryfields.io import load eng = StarshipEngine() - prog = load("chip0.xbb") + prog = load("test.xbb") result = eng.run(prog) print(result.samples) From 1d9b4073fab51465026f3f4ce0289bd437a20a40 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 09:25:11 -0400 Subject: [PATCH 092/335] Add test for testing limited configuration file --- tests/frontend/test_configuration.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index cf3eb98db..b6f166cbf 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -32,6 +32,12 @@ use_ssl = true """ +TEST_FILE_ONE_VALUE = """\ +[api] +# Options for the Strawberry Fields Cloud API +authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" +""" + EXPECTED_CONFIG = { "api": { "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", @@ -134,3 +140,18 @@ def test_env_vars_take_precedence(self, tmpdir): config = conf.Configuration(str(filename)) assert config.api["hostname"] == host + + def test_update_config_with_limited_config_file(self, tmpdir, monkeypatch): + """ + This test asserts that the given a config file that only provides a single + value, the rest of the configuration values are filled in using defaults. + """ + filename = tmpdir.join("config.toml") + + with open(filename, "w") as f: + f.write(TEST_FILE_ONE_VALUE) + + config = conf.Configuration(str(filename)) + assert config.api["hostname"] == conf.DEFAULT_CONFIG["api"]["hostname"] + assert config.api["use_ssl"] == conf.DEFAULT_CONFIG["api"]["use_ssl"] + assert config.api["authentication_token"] == "071cdcce-9241-4965-93af-4a4dbc739135" From 62833b7b33cdd1e1ec4bf61c97445ebb96912723 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 10:56:13 -0400 Subject: [PATCH 093/335] Add hello command, minor bug fix --- starship | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/starship b/starship index 08b1fe350..855da7867 100755 --- a/starship +++ b/starship @@ -21,6 +21,7 @@ import argparse import pdb from strawberryfields.engine import StarshipEngine +from strawberryfields.api_client import APIClient from strawberryfields.io import load from strawberryfields import configuration @@ -49,6 +50,12 @@ if __name__ == "__main__": help="an interactive tool to reconfigure the API connection before executing the program", ) + group.add_argument( + "--hello", + action="store_true", + help="test the API connection", + ) + args = parser.parse_args() if args.reconfigure: @@ -75,16 +82,29 @@ if __name__ == "__main__": config.api["authentication_token"] = authentication_token config.save("config.toml") sys.exit() + elif args.hello: + client = APIClient() + response = client.get("") + if response.status_code == 403: + sys.stderr.write( + "Could not authenticate with token {} - please try again\n".format( + client.AUTHENTICATION_TOKEN)) + sys.exit(1) + else: + # Hack at the moment, but should check status code 200 against test endpoint + sys.stdout.write("You have successfully authenticated to the platform!\n") + sys.exit() eng = StarshipEngine() program = load(args.input) result = eng.run(program) - if hasattr(args, "output_path"): - with open(args.output_path, "w") as file: - file.write(str(result.samples)) - else: - sys.stdout.write(str(result.samples)) + if result.samples is not None: + if hasattr(args, "output_path"): + with open(args.output_path, "w") as file: + file.write(str(result.samples)) + else: + sys.stdout.write(str(result.samples)) if args.debug: pdb.set_trace() From 45842feb2d5480c167f96c4f903f6e8b88745709 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 11:14:48 -0400 Subject: [PATCH 094/335] Better checks --- starship | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/starship b/starship index 855da7867..8aefc943f 100755 --- a/starship +++ b/starship @@ -49,7 +49,6 @@ if __name__ == "__main__": action="store_true", help="an interactive tool to reconfigure the API connection before executing the program", ) - group.add_argument( "--hello", action="store_true", @@ -84,11 +83,18 @@ if __name__ == "__main__": sys.exit() elif args.hello: client = APIClient() - response = client.get("") - if response.status_code == 403: - sys.stderr.write( - "Could not authenticate with token {} - please try again\n".format( - client.AUTHENTICATION_TOKEN)) + try: + response = client.get("") + except Exception as e: + sys.stderr.write("Could not connect to server:\n{}\n".format(e)) + sys.exit(1) + if response.status_code in (401, 403): + if not client.AUTHENTICATION_TOKEN: + sys.stderr.write("Could not authenticate -- no token provided\n") + else: + sys.stderr.write( + "Could not authenticate with token {} - please try again\n".format( + client.AUTHENTICATION_TOKEN)) sys.exit(1) else: # Hack at the moment, but should check status code 200 against test endpoint From 1a8c7942c76696ac90860aedaf2ef00c931f33fb Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 13:16:10 -0400 Subject: [PATCH 095/335] Test debug mode --- tests/frontend/test_api_client.py | 33 +++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 85e026993..28ad07a80 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -352,6 +352,39 @@ def test_handle_refresh_data(self): for field in mock_resource.fields: field.set.assert_called_once_with(mock_data[field.name]) + def test_debug_mode(self, monkeypatch): + """ + Tests that the client object keeps track of responses and errors when debug mode is enabled. + """ + class MockException(Exception): + """ + A mock exception to ensure that the exception raised is the expected one. + """ + pass + + def mock_raise(exception): + raise exception + + mock_get_response = MockGETResponse(200) + + monkeypatch.setattr(requests, "get", lambda url, headers: mock_get_response) + monkeypatch.setattr(requests, "post", lambda url, headers, data: mock_raise(MockException)) + + client = api_client.APIClient(debug=True) + + assert client.DEBUG is True + assert client.errors == [] + assert client.responses == [] + + client.get("") + assert len(client.responses) == 1 + assert client.responses[0] == mock_get_response + + with pytest.raises(MockException): + client.post("", {}) + + assert len(client.errors) == 1 + class TestJob: def test_create_created(self, monkeypatch): From a54790fed0840024f3ce3fbdb8ff9e41e5162e9a Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 14:08:21 -0400 Subject: [PATCH 096/335] Add toml to doc requirements --- doc/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/requirements.txt b/doc/requirements.txt index 7df0ffe5b..16f78d48e 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,3 +1,4 @@ sphinxcontrib-bibtex ipykernel nbsphinx +toml From 36a52fd354258c8323722351b680bd703b76f503 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 14:13:41 -0400 Subject: [PATCH 097/335] Add appdirs to doc requirements --- doc/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/requirements.txt b/doc/requirements.txt index 16f78d48e..81cee25f5 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,3 +2,4 @@ sphinxcontrib-bibtex ipykernel nbsphinx toml +appdirs From 75a3132eef679187e51b8cdca03776bd6f3a7545 Mon Sep 17 00:00:00 2001 From: Zeid Date: Thu, 25 Jul 2019 14:52:18 -0400 Subject: [PATCH 098/335] Fix bug when result is None (e.g. if job is queued in the background) --- starship | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starship b/starship index 8aefc943f..c2b79110d 100755 --- a/starship +++ b/starship @@ -105,7 +105,7 @@ if __name__ == "__main__": program = load(args.input) result = eng.run(program) - if result.samples is not None: + if result and result.samples is not None: if hasattr(args, "output_path"): with open(args.output_path, "w") as file: file.write(str(result.samples)) From 6ee671669dc69238d9b7b0f67bef64fde39240ca Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 30 Jul 2019 22:48:12 +0800 Subject: [PATCH 099/335] Tutorial interferometer (#151) * Fixing documentation formatting and compilation issues, updating tutorial with compilation * Apply suggestions from code review Co-Authored-By: Tom Bromley <49409390+trbromley@users.noreply.github.com> * update first codeblock --- doc/index.rst | 2 + doc/tutorials/starship.rst | 149 ++++++++++++++++++++++++++++----- strawberryfields/api_client.py | 2 +- strawberryfields/engine.py | 2 +- 4 files changed, 132 insertions(+), 23 deletions(-) diff --git a/doc/index.rst b/doc/index.rst index c1d74e00e..4f228be30 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -134,6 +134,7 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic Boson sampling & the permanent Gaussian boson sampling & the hafnian Optimization & machine learning + tutorials/starship.rst .. toctree:: :maxdepth: 2 @@ -161,3 +162,4 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/backend.gaussian code/backend.fock code/backend.tf + code/api_client.rst diff --git a/doc/tutorials/starship.rst b/doc/tutorials/starship.rst index e47e263f0..25c1587f0 100644 --- a/doc/tutorials/starship.rst +++ b/doc/tutorials/starship.rst @@ -5,12 +5,15 @@ Running Jobs with StarshipEngine .. sectionauthor:: Zeid Zabaneh -In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Xanadu cloud platform and execute jobs remotely (e.g., on a quantum chip). +In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Xanadu +cloud platform and execute jobs remotely (e.g., on a quantum chip). Configuring StarshipEngine ========================== -Before using StarshipEngine, you need to configure the hostname and authentication token that will provide you access to the API. The easiest way is to create a configuration file named ``config.toml`` in your working directory. A typical file looks like this: +Before using StarshipEngine, you need to configure the hostname and authentication token that will provide +you access to the API. The easiest way is to create a configuration file named ``config.toml`` in your +working directory. A typical file looks like this: .. code-block:: console @@ -37,46 +40,67 @@ To test connectivity, you can use the following command: You have successfully authenticated to the platform! +.. _first_program: + Executing your first program ============================ -The easiest way to execute a program using StarshipEngine is to create a blackbird script (an ``xbb`` file) and place it in your current working directory. Check the :ref:`blackbird tutorial ` for how to create this file. -For this example, we will use the following file and save it to ``test.xbb`` in our current working directory: +The easiest way to execute a program using StarshipEngine is to create a Blackbird script (an ``xbb`` file) +and place it in your current working directory. Check the :ref:`Blackbird tutorial ` for how to create this file. + +For this example, consider the following Blackbird script, which represents a quantum program that matches +exactly the gate layout of the `chip0` photonic hardware device. We will save the following file as ``test.xbb`` +in our current working directory: .. code-block:: python - name template_2x2_chip0 - version 1.0 - target chip0 (shots = 50) + name template_2x2_chip0 # Name of the program + version 1.0 # Blackbird version number + target chip0 (shots = 50) # This program will run on chip0 for 50 shots - float sqz0 = 1.0 - float sqz1 = 1.0 + # Define the interferometer phase values float phi0 = 0.574 float phi1 = 1.33 - # for n spatial degrees, first n signal modes, then n idler modes, phase zero - S2gate(sqz0, 0.0) | [0, 2] - S2gate(sqz1, 0.0) | [1, 3] + # final local phases + float local_phase_0 = -0.543 + float local_phase_1 = 2.43 + float local_phase_2 = 0.11 + float local_phase_3 = -3.21 + + # Initial states are two-mode squeezed states + S2gate(1.0, 0.0) | [0, 2] + S2gate(1.0, 0.0) | [1, 3] - # standard 2x2 interferometer for the signal modes (the lower ones in frequency) + # A standard two-mode interferometer is applied + # to the first pair of modes Rgate(phi0) | [0] BSgate(pi/4, pi/2) | [0, 1] Rgate(phi1) | [0] BSgate(pi/4, pi/2) | [0, 1] - #duplicate the interferometer for the idler modes (the higher ones in frequency) + # The 2x2 interferometer above is duplicated + # for the second pair of modes Rgate(phi0) | [2] BSgate(pi/4, pi/2) | [2, 3] Rgate(phi1) | [2] BSgate(pi/4, pi/2) | [2, 3] - # Measurement in Fock basis + # final local phases + Rgate(local_phase_0) | 0 + Rgate(local_phase_1) | 1 + Rgate(local_phase_2) | 2 + Rgate(local_phase_3) | 3 + + # Perform a photon number counting measurement MeasureFock() | [0, 1, 2, 3] -After you have created your ``xbb`` file, you can execute it using the command line, or using a python shell. +After you have created your Blackbird script, you can execute it using the command line, or using a Python shell. + + +Executing your Blackbird script using Python +-------------------------------------------- -Executing your xbb file using Python -==================================== To execute this file using Python, you can use a code block like this: .. code-block:: python3 @@ -89,12 +113,95 @@ To execute this file using Python, you can use a code block like this: result = eng.run(prog) print(result.samples) -Executing your xbb file from the command line -============================================= + +Executing your Blackbird script from the command line +----------------------------------------------------- + To execute this file from the command line, use the ``starship`` command as follows: .. code-block:: console starship --input test.xbb --output out.txt -After executing the above command, the result will be stored in ``out.txt`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. +After executing the above command, the result will be stored in ``out.txt`` in the current working directory. +You can also omit the ``--output`` parameter to print the result to the screen. + + +Program compilation +=================== + +In addition to using the program template above, which directly matches the physical +layout of the hardware device, you can apply any two-mode interferometer to the pairs of modes. +The interferometer can be composed of any combination +of beamsplitters (:class:`~.ops.BSgate`), rotations/phase shifts (:class:`~.ops.Rgate`). +Furthermore, you can use the :class:`~.ops.Interferometer` command to directly pass a +unitary matrix to be decomposed and compiled to match the device architecture. + +For example, consider the following Blackbird script: + + +.. code-block:: python + + name compilation_example # Name of the program + version 1.0 # Blackbird version number + target chip0 (shots=50) # This program will run on chip0 for 50 shots + + # Define a unitary matrix + complex array U[2, 2] = + -0.1955885-0.16833594j, 0.77074506+0.58254631j + -0.03596574+0.96546083j, 0.00676031+0.2579654j + + # Initial states are two-mode squeezed states, + # applied to alternating pairs of modes. + S2gate(1.0, 0.0) | [0, 2] + S2gate(1.0, 0.0) | [1, 3] + + # Apply the unitary matrix above to + # the first pair of modes, as well + # as a beamsplitter + Interferometer(U) | [0, 1] + BSgate(0.543, -0.123) | [0, 1] + + # Duplicate the above unitary for + # the second pair of modes + Interferometer(U) | [2, 3] + BSgate(0.543, -0.123) | [2, 3] + + # Perform a PNR measurement in the Fock basis + MeasureFock() | [0, 1, 2, 3] + + +.. note:: You may use :func:`~.random_interferometer` to generate arbitrary random unitaries. + +This program will execute following the same steps as above; ``StarshipEngine`` will automatically +compile the program to match the layout of the chip described in :ref:`first_program`. + +You may wish to view the compiled program; this can be easily done in Python using +the :meth:`~.Program.compile` method: + + +>>> from strawberryfields import StarshipEngine +>>> from strawberryfields.io import load +>>> prog = load("test.xbb") +>>> prog = prog.compile("chip0") +>>> prog.print() +S2gate(1, 0) | (q[0], q[2]) +S2gate(1, 0) | (q[1], q[3]) +Rgate(0.9355) | (q[0]) +BSgate(0.7854, 1.571) | (q[0], q[1]) +Rgate(4.886) | (q[0]) +BSgate(0.7854, 1.571) | (q[0], q[1]) +Rgate(-0.3742) | (q[0]) +Rgate(-0.05099) | (q[1]) +Rgate(0.9355) | (q[2]) +BSgate(0.7854, 1.571) | (q[2], q[3]) +Rgate(4.886) | (q[2]) +BSgate(0.7854, 1.571) | (q[2], q[3]) +Rgate(-0.3742) | (q[2]) +Rgate(-0.05099) | (q[3]) +MeasureFock | (q[0], q[1], q[2], q[3]) + +and even saved as a new Blackbird script using the :func:`~io.save` function: + +>>> from strawberryfields.io import save +>>> save("test_compiled.xbb", prog) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index d717a96b5..edbffdccb 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -14,7 +14,7 @@ """ APIClient library -================ +================= **Module name:** :mod:`strawberryfields.api_client` diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index dd7a7484a..9004d7b43 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -572,7 +572,7 @@ def run(self, program, shots=1, **kwargs): * The Program instance is compiled for the target backend. * The compiled program is sent as a job to the Starship - * The measurement results of each subsystem (if any) are stored in the :attr:`~.samples`. + * The measurement results of each subsystem (if any) are stored in the :attr:`~.BaseEngine.samples`. * The compiled program is appended to self.run_progs. * The queued or completed jobs are appended to self.jobs. From 8b8ca161e3fdec98a597e9c847f653d4f15ac52a Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 30 Jul 2019 16:18:45 -0400 Subject: [PATCH 100/335] Check correct status code --- starship | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/starship b/starship index c2b79110d..60b347e70 100755 --- a/starship +++ b/starship @@ -88,7 +88,11 @@ if __name__ == "__main__": except Exception as e: sys.stderr.write("Could not connect to server:\n{}\n".format(e)) sys.exit(1) - if response.status_code in (401, 403): + + if response.status_code == 200: + sys.stdout.write("You have successfully authenticated to the platform!\n") + sys.exit() + elif response.status_code in (401, 403): if not client.AUTHENTICATION_TOKEN: sys.stderr.write("Could not authenticate -- no token provided\n") else: @@ -97,9 +101,9 @@ if __name__ == "__main__": client.AUTHENTICATION_TOKEN)) sys.exit(1) else: - # Hack at the moment, but should check status code 200 against test endpoint - sys.stdout.write("You have successfully authenticated to the platform!\n") - sys.exit() + sys.stderr.write("Could not connect to server: {}, {}\n".format( + str(response.status_code), response.content)) + sys.exit(1) eng = StarshipEngine() program = load(args.input) From f6a3809db2ba3ae0ad630c04942325a1c3947b0e Mon Sep 17 00:00:00 2001 From: Zeid Date: Wed, 31 Jul 2019 15:10:24 -0400 Subject: [PATCH 101/335] Show error response from server when a job fails --- strawberryfields/api_client.py | 6 ++++-- strawberryfields/engine.py | 12 ++++++++---- tests/frontend/test_api_client.py | 3 ++- tests/frontend/test_engine.py | 3 ++- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index edbffdccb..a49bcd880 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -280,7 +280,8 @@ class ResourceManager: This class handles all interactions with APIClient by the resource. """ - http_status_code = None + http_response_data = None + http_response_status_code = None errors = None def __init__(self, resource, client=None): @@ -345,7 +346,8 @@ def handle_response(self, response): response (requests.Response): a response object to be parsed """ if hasattr(response, "status_code"): - self.http_status_code = response.status_code + self.http_response_data = response.json() + self.http_response_status_code = response.status_code if response.status_code in (200, 201): self.handle_success_response(response) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 9004d7b43..53926fc77 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -322,6 +322,7 @@ class LocalEngine(BaseEngine): backend (str, BaseBackend): short name of the backend, or a pre-constructed backend instance backend_options (None, Dict[str, Any]): keyword arguments to be passed to the backend """ + def __init__(self, backend, *, backend_options=None): backend_options = backend_options or {} super().__init__(backend, backend_options) @@ -415,11 +416,13 @@ def run(self, program, *, compile_options=None, run_options=None): temp_run_options.update(run_options or {}) temp_run_options.setdefault("shots", 1) - temp_run_options.setdefault('modes', None) + temp_run_options.setdefault("modes", None) # avoid unexpected keys being sent to Operations eng_run_keys = ["eval", "session", "feed_dict", "shots"] - eng_run_options = {key: temp_run_options[key] for key in temp_run_options.keys() & eng_run_keys} + eng_run_options = { + key: temp_run_options[key] for key in temp_run_options.keys() & eng_run_keys + } result = super()._run(program, compile_options=compile_options, **eng_run_options) @@ -455,6 +458,7 @@ def __init__(self, polling_delay_seconds=1, **kwargs): # todo: move this into backend class class Chip0Backend(BaseBackend): circuit_spec = "chip0" + self.backend = Chip0Backend() api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG["api"].keys()} @@ -559,8 +563,8 @@ def _run_program(self, program, **kwargs): self.reset() if job.is_failed: - # TODO: Add failure details here. Should this exception be raised elsewhere? - raise JobExecutionError("Job execution failed. Please try again.") + message = str(job.manager.http_response_data["meta"]) + raise JobExecutionError(message) elif job.is_complete: job.result.manager.get() return job.result.result.value diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 28ad07a80..edd1e9602 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -325,7 +325,8 @@ def test_handle_response(self, monkeypatch): monkeypatch.setattr(manager, "handle_error_response", mock_handle_error_response) manager.handle_response(mock_response) - assert manager.http_status_code == mock_response.status_code + assert manager.http_response_data == mock_response.json() + assert manager.http_response_status_code == mock_response.status_code mock_handle_error_response.assert_called_once_with(mock_response) mock_response.status_code = 200 diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 31a2c06b1..c1c17db45 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -263,8 +263,9 @@ def test__run_program_fails(self, starship_engine, monkeypatch): some_params = {"param": MagicMock()} - with pytest.raises(JobExecutionError): + with pytest.raises(JobExecutionError) as e: starship_engine._run_program(program, **some_params) + assert e.value.args[0] == str(mock_job.manager.http_response_data['meta']) def test__run(self, starship_engine, monkeypatch): """ From a93cdb53897e771d7a12f9c6c2c0544c637277dc Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 2 Aug 2019 15:28:45 +0800 Subject: [PATCH 102/335] target to property, and convert run options (#156) --- strawberryfields/io.py | 4 ++++ strawberryfields/program.py | 16 ++++++++++++++-- tests/frontend/test_io.py | 10 ++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/strawberryfields/io.py b/strawberryfields/io.py index 740b956c4..75eb55702 100644 --- a/strawberryfields/io.py +++ b/strawberryfields/io.py @@ -56,6 +56,10 @@ def to_blackbird(prog, version="1.0"): # set the target bb._target["name"] = prog.target + # set the run options + if prog.run_options: + bb._target["options"] = prog.run_options + # fill in the quantum circuit for cmd in prog.circuit: op = {"kwargs": {}, "args": []} diff --git a/strawberryfields/program.py b/strawberryfields/program.py index e64422168..25476f336 100644 --- a/strawberryfields/program.py +++ b/strawberryfields/program.py @@ -188,7 +188,7 @@ def __init__(self, num_subsystems, name=None): #: bool: if True, no more Commands can be appended to the Program self.locked = False #: str, None: for compiled Programs, the short name of the target CircuitSpecs template, otherwise None - self.target = None + self._target = None #: Program, None: for compiled Programs, this is the original, otherwise None self.source = None @@ -523,7 +523,7 @@ def compile(self, target, **kwargs): # create the compiled Program compiled = self._linked_copy() compiled.circuit = seq - compiled.target = target + compiled._target = target # get run options of compiled program # for the moment, shots is the only supported run option. @@ -573,3 +573,15 @@ def draw_circuit(self, tex_dir='./circuit_tex', write_to_file=True): document = drawer.compile_document(tex_dir=tex_dir) return [document, tex] + + @property + def target(self): + """The target specification the program has been compiled against. + + If the program has not been compiled, this will return ``None``. + + Returns: + str or None: the short name of the target CircuitSpecs template if + compiled, otherwise None + """ + return self._target diff --git a/tests/frontend/test_io.py b/tests/frontend/test_io.py index e8fcc68b2..2766bb522 100644 --- a/tests/frontend/test_io.py +++ b/tests/frontend/test_io.py @@ -109,6 +109,16 @@ def test_metadata(self): assert bb.version == "1.0" assert bb.target["name"] == "gaussian" + def test_metadata_run_options(self): + """Test run options correctly converts""" + prog = Program(4, name="test_program") + bb = io.to_blackbird(prog.compile("gaussian", shots=1024)) + + assert bb.name == "test_program" + assert bb.version == "1.0" + assert bb.target["name"] == "gaussian" + assert bb.target["options"] == {"shots": 1024} + def test_gate_noarg(self): """Test gate with no argument converts""" # create a test program From 1ec5310be9bf9c455b8778d04c8b4efe859a44a2 Mon Sep 17 00:00:00 2001 From: Zeid Date: Tue, 29 Oct 2019 14:10:15 -0400 Subject: [PATCH 103/335] Add io.loads function to load from string --- strawberryfields/io.py | 15 +++++++++++++-- tests/frontend/test_io.py | 4 ++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/strawberryfields/io.py b/strawberryfields/io.py index 740b956c4..315ac68d8 100644 --- a/strawberryfields/io.py +++ b/strawberryfields/io.py @@ -190,6 +190,18 @@ def save(f, prog): fid.close() +def loads(s): + """Load a quantum program from a string. + + Args: + s (str): string containing the Blackbird circuit + Returns: + prog (Program): Strawberry Fields program + + """ + bb = blackbird.loads(s) + return to_program(bb) + def load(f): """Load a quantum program from a Blackbird .xbb file. @@ -225,5 +237,4 @@ def load(f): fid.close() # load blackbird program - bb = blackbird.loads(bb_str) - return to_program(bb) + return loads(bb_str) diff --git a/tests/frontend/test_io.py b/tests/frontend/test_io.py index e8fcc68b2..5c95ac62f 100644 --- a/tests/frontend/test_io.py +++ b/tests/frontend/test_io.py @@ -648,3 +648,7 @@ def test_load_file_object(self, prog, tmpdir): # check loaded program is the same as expected self.assert_programs_equal(res, prog) + + def test_loads(self, prog): + """Test loading a program from a string""" + self.assert_programs_equal(io.loads(test_prog_not_compiled), prog) From 9d7e3e926c14768f0ba330aa23fea84b8a7e9d9a Mon Sep 17 00:00:00 2001 From: Zeid Zabaneh Date: Fri, 29 Nov 2019 14:52:38 -0500 Subject: [PATCH 104/335] Simulon support (#237) * Add custom port option for configuration * Improved backend support; program compilation * Fix tests * Minor change * Add copyright notice to new file * Minor changes * Minor changes * Factor out backend init functionality * Minor change --- starship | 14 ++++-- strawberryfields/api_client.py | 2 + strawberryfields/backends/__init__.py | 6 ++- .../backends/chip0backend/__init__.py | 15 +++++++ .../backends/chip0backend/backend.py | 27 ++++++++++++ strawberryfields/configuration.py | 7 ++- strawberryfields/engine.py | 43 +++++++++---------- strawberryfields/program.py | 8 ++-- tests/frontend/test_configuration.py | 1 + tests/frontend/test_engine.py | 9 ++-- 10 files changed, 95 insertions(+), 37 deletions(-) create mode 100644 strawberryfields/backends/chip0backend/__init__.py create mode 100644 strawberryfields/backends/chip0backend/backend.py diff --git a/starship b/starship index 60b347e70..3070131ba 100755 --- a/starship +++ b/starship @@ -27,9 +27,11 @@ from strawberryfields import configuration PROMPTS = { "hostname": "Please enter the hostname of the server to connect to: [{}] ", + "port": "Please enter the port number to connect with: [{}] ", + "use_ssl": "Should the client attempt to connect over SSL? [{}] ", "authentication_token": "Please enter the authentication token to use when connecting: [{}] ", "save": "Would you like to save these settings to a local cofiguration file in the current " - "directory? [Y/n] ", + "directory? [{}] ", } if __name__ == "__main__": @@ -67,10 +69,12 @@ if __name__ == "__main__": input(PROMPTS["authentication_token"].format(config.api["authentication_token"])) or config.api["authentication_token"] ) - save = input(PROMPTS["save"]).upper() == "Y" + port = input(PROMPTS["port"].format(config.api["port"])) or config.api["port"] + use_ssl = input(PROMPTS["use_ssl"].format("y" if config.api["use_ssl"] else "n")).upper() == "Y" + save = input(PROMPTS["save"].format("y" if config.api["use_ssl"] else "n")).upper() == "Y" if not save: - sys.stdout.write("Not writing configuration to file...") + sys.stdout.write("Not writing configuration to file...\n") else: if not os.path.isfile("config.toml"): sys.stdout.write("Writing configuration file to current working directory...\n") @@ -79,6 +83,8 @@ if __name__ == "__main__": config.api["hostname"] = hostname config.api["authentication_token"] = authentication_token + config.api["port"] = port + config.api["use_ssl"] = use_ssl config.save("config.toml") sys.exit() elif args.hello: @@ -105,8 +111,8 @@ if __name__ == "__main__": str(response.status_code), response.content)) sys.exit(1) - eng = StarshipEngine() program = load(args.input) + eng = StarshipEngine(program.target) result = eng.run(program) if result and result.samples is not None: diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index a49bcd880..61dc044d6 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -166,6 +166,8 @@ def __init__(self, **kwargs): self.HOSTNAME = self._config["hostname"] self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) + if self._config["port"] != 443: + self.BASE_URL = "{}:{}".format(self.BASE_URL, self._config["port"]) self.AUTHENTICATION_TOKEN = self._config["authentication_token"] self.HEADERS = {"User-Agent": self.USER_AGENT} self.DEBUG = self._config["debug"] diff --git a/strawberryfields/backends/__init__.py b/strawberryfields/backends/__init__.py index f3282b094..3fd33d69d 100644 --- a/strawberryfields/backends/__init__.py +++ b/strawberryfields/backends/__init__.py @@ -16,6 +16,7 @@ from .base import BaseBackend, BaseFock, BaseGaussian, ModeMap from .gaussianbackend import GaussianBackend from .fockbackend import FockBackend +from .chip0backend import Chip0Backend __all__ = [ "BaseBackend", @@ -24,9 +25,12 @@ "FockBackend", "GaussianBackend", "TFBackend", + "Chip0Backend", ] -supported_backends = {b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend)} +supported_backends = { + b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend, Chip0Backend) +} def load_backend(name): diff --git a/strawberryfields/backends/chip0backend/__init__.py b/strawberryfields/backends/chip0backend/__init__.py new file mode 100644 index 000000000..4d27a60a8 --- /dev/null +++ b/strawberryfields/backends/chip0backend/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .backend import Chip0Backend diff --git a/strawberryfields/backends/chip0backend/backend.py b/strawberryfields/backends/chip0backend/backend.py new file mode 100644 index 000000000..c6a29fd4d --- /dev/null +++ b/strawberryfields/backends/chip0backend/backend.py @@ -0,0 +1,27 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Chip0 backend interface +======================= + +""" + + +from strawberryfields.backends.base import BaseBackend + + +class Chip0Backend(BaseBackend): + short_name = "chip0" + circuit_spec = "chip0" diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 14c89f576..de7649c07 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -96,7 +96,12 @@ DEFAULT_CONFIG = { - "api": {"authentication_token": "", "hostname": "localhost", "use_ssl": True, "debug": False} + "api": { + "authentication_token": "", + "hostname": "localhost", + "use_ssl": True, + "port": 443, + "debug": False} } BOOLEAN_KEYS = ("debug", "use_ssl") diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index cdf10691b..84b87adde 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -156,6 +156,8 @@ class BaseEngine(abc.ABC): backend_options (Dict[str, Any]): keyword arguments for the backend """ + REMOTE = False + def __init__(self, backend, backend_options=None): if backend_options is None: backend_options = {} @@ -169,6 +171,15 @@ def __init__(self, backend, backend_options=None): #: List[List[Number]]: latest measurement results, shape == (modes, shots) self.samples = None + if isinstance(backend, str): + self.backend_name = backend + self.backend = load_backend(backend) + elif isinstance(backend, BaseBackend): + self.backend_name = backend.short_name + self.backend = backend + else: + raise TypeError("backend must be a string or a BaseBackend instance.") + @abc.abstractmethod def __str__(self): """String representation.""" @@ -298,13 +309,13 @@ def _broadcast_nones(val, dim): p = p.compile(target, **compile_options) p.lock() - if self.backend_name in getattr(self, "HARDWARE_BACKENDS", []): + if self.REMOTE: self.samples = self._run_program(p, **kwargs) else: self._run_program(p, **kwargs) shots = kwargs.get("shots", 1) self.samples = [ - _broadcast_nones(p.reg_refs[k].val, kwargs["shots"]) for k in sorted(p.reg_refs) + _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) ] self.run_progs.append(p) @@ -329,16 +340,6 @@ def __init__(self, backend, *, backend_options=None): backend_options = backend_options or {} super().__init__(backend, backend_options) - if isinstance(backend, str): - self.backend_name = backend - #: BaseBackend: backend for executing the quantum circuit - self.backend = load_backend(backend) - elif isinstance(backend, BaseBackend): - self.backend_name = backend.short_name - self.backend = backend - else: - raise TypeError("backend must be a string or a BaseBackend instance.") - def __str__(self): return self.__class__.__name__ + "({})".format(self.backend_name) @@ -447,21 +448,16 @@ class StarshipEngine(BaseEngine): Args: backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance + polling_delay_seconds (int): the number of seconds to wait between queries when polling for + job results """ - HARDWARE_BACKENDS = ("chip0",) + # This engine will execute jobs remotely. + REMOTE = True - def __init__(self, polling_delay_seconds=1, **kwargs): - # Only chip0 backend supported initially. - backend = "chip0" + def __init__(self, backend, polling_delay_seconds=1, **kwargs): super().__init__(backend) - # todo: move this into backend class - class Chip0Backend(BaseBackend): - circuit_spec = "chip0" - - self.backend = Chip0Backend() - api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG["api"].keys()} self.client = APIClient(**api_client_params) self.polling_delay_seconds = polling_delay_seconds @@ -501,7 +497,7 @@ def _get_blackbird(self, shots, program): # TODO: This is potentially not needed here bb._target["name"] = self.backend_name - bb._target["options"] = {"shots": shots} + bb._target["options"] = {"shots": shots, **program.backend_options} return bb def _queue_job(self, job_content): @@ -518,6 +514,7 @@ def _queue_job(self, job_content): job = Job(client=self.client) job.manager.create(circuit=job_content) self.jobs.append(job) + print("Job {} is sent to server.".format(job.id.value)) return job def _run_program(self, program, **kwargs): diff --git a/strawberryfields/program.py b/strawberryfields/program.py index 54e1720bb..08cb95628 100644 --- a/strawberryfields/program.py +++ b/strawberryfields/program.py @@ -347,7 +347,7 @@ def _delete_subsystems(self, refs): for r in refs: # mark the RegRef as deleted r.active = False - #self.reg_refs[r.ind].active = False + # self.reg_refs[r.ind].active = False # NOTE: deleted indices are *not* removed from self.unused_indices def lock(self): @@ -533,8 +533,11 @@ def compile(self, target, **kwargs): if "shots" in kwargs: compiled.run_options["shots"] = kwargs["shots"] - return compiled + compiled.backend_options = {} + if "cutoff_dim" in kwargs: + compiled.backend_options["cutoff_dim"] = kwargs["cutoff_dim"] + return compiled def optimize(self): """Simplify and optimize the program. @@ -553,7 +556,6 @@ def optimize(self): opt.circuit = pu.optimize_circuit(self.circuit) return opt - def draw_circuit(self, tex_dir='./circuit_tex', write_to_file=True): r"""Draw the circuit using the Qcircuit :math:`\LaTeX` package. diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 71dcfcf63..847b96731 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -47,6 +47,7 @@ "hostname": "localhost", "use_ssl": True, "debug": False, + "port": 443, } } diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index b4a630a3d..4c1f14b81 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -46,7 +46,7 @@ def starship_engine(monkeypatch): """ mock_api_client = MagicMock() monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine(polling_delay_seconds=0) + engine = StarshipEngine("chip0", polling_delay_seconds=0) return engine @@ -178,9 +178,10 @@ def test_init(self, monkeypatch): """ mock_api_client = MagicMock() monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine() + engine = StarshipEngine("chip0") assert engine.client == mock_api_client() assert engine.jobs == [] + assert engine.REMOTE == True def test_reset(self, starship_engine): """ @@ -282,7 +283,6 @@ def test__run(self, starship_engine, monkeypatch): methods = MagicMock() monkeypatch.setattr(starship_engine, "backend_name", str(inputs.mock_backend)) - monkeypatch.setattr(starship_engine, "HARDWARE_BACKENDS", [str(inputs.mock_backend)]) monkeypatch.setattr(starship_engine, "_run_program", methods._run_program) monkeypatch.setattr("strawberryfields.engine.Result", outputs.result) monkeypatch.setattr(starship_engine, "backend", inputs.mock_backend) @@ -290,7 +290,6 @@ def test__run(self, starship_engine, monkeypatch): result = starship_engine._run(inputs.program, shots=inputs.shots, args={}, compile_options={}) - assert starship_engine.backend_name in starship_engine.HARDWARE_BACKENDS inputs.program.compile.assert_called_once_with(starship_engine.backend.circuit_spec) mock_compiled_program = inputs.program.compile(starship_engine.backend_name) mock_compiled_program.lock.assert_called_once() @@ -328,7 +327,7 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): # NOTE: this is currently more of an integration test, currently a WIP / under development. api_client_params = {"hostname": "localhost"} - engine = StarshipEngine(polling_delay_seconds=0, **api_client_params) + engine = StarshipEngine("chip0", polling_delay_seconds=0, **api_client_params) # We don't want to actually send any requests, though we should make sure POST was called mock_api_client_post = MagicMock() From 461c9908af865531d36b0934610fc088d082b15e Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Sun, 1 Dec 2019 19:04:43 -0500 Subject: [PATCH 105/335] Creates a Chip2 circuit spec (#249) * added chip2 spec * Apply suggestions from code review Co-Authored-By: Nathan Killoran * Apply suggestions from code review * suggested changes * added tests * suggested changes --- doc/requirements.txt | 1 - strawberryfields/circuitspecs/__init__.py | 3 +- strawberryfields/circuitspecs/chip0.py | 2 +- strawberryfields/circuitspecs/chip2.py | 280 ++++++++++++++ tests/frontend/test_circuitspecs_chip2.py | 449 ++++++++++++++++++++++ 5 files changed, 732 insertions(+), 3 deletions(-) create mode 100644 strawberryfields/circuitspecs/chip2.py create mode 100644 tests/frontend/test_circuitspecs_chip2.py diff --git a/doc/requirements.txt b/doc/requirements.txt index 177b35222..b684212a3 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -9,7 +9,6 @@ scipy>=1.0.0 sklearn sphinx-autodoc-typehints sphinx-gallery -sphinxcontrib-bibtex sphinxcontrib-bibtex==0.4.2 tensorflow-tensorboard>=0.1.8 tensorflow==1.3 diff --git a/strawberryfields/circuitspecs/__init__.py b/strawberryfields/circuitspecs/__init__.py index 0f44791b7..a95757c64 100644 --- a/strawberryfields/circuitspecs/__init__.py +++ b/strawberryfields/circuitspecs/__init__.py @@ -83,13 +83,14 @@ """ from .circuit_specs import CircuitSpecs from .chip0 import Chip0Specs +from .chip2 import Chip2Specs from .fock import FockSpecs from .gaussian import GaussianSpecs from .gbs import GBSSpecs from .tensorflow import TFSpecs -specs = (Chip0Specs, FockSpecs, GaussianSpecs, GBSSpecs, TFSpecs) +specs = (Chip0Specs, Chip2Specs, FockSpecs, GaussianSpecs, GBSSpecs, TFSpecs) circuit_db = {c.short_name: c for c in specs} """dict[str, ~strawberryfields.circuitspecs.CircuitSpecs]: Map from circuit family short name to the corresponding class.""" diff --git a/strawberryfields/circuitspecs/chip0.py b/strawberryfields/circuitspecs/chip0.py index e080fee49..16f971c8b 100644 --- a/strawberryfields/circuitspecs/chip0.py +++ b/strawberryfields/circuitspecs/chip0.py @@ -33,7 +33,7 @@ class Chip0Specs(CircuitSpecs): modes = 4 remote = True local = True - interactive = True + interactive = False primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} decompositions = { diff --git a/strawberryfields/circuitspecs/chip2.py b/strawberryfields/circuitspecs/chip2.py new file mode 100644 index 000000000..76b1cd918 --- /dev/null +++ b/strawberryfields/circuitspecs/chip2.py @@ -0,0 +1,280 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Circuit class specification for the chip0 class of circuits.""" +import textwrap + +import numpy as np +from numpy.linalg import multi_dot +from scipy.linalg import block_diag + +from strawberryfields.program_utils import CircuitError, Command, group_operations +from strawberryfields.parameters import par_evaluate +import strawberryfields.ops as ops + +from .circuit_specs import CircuitSpecs +from .gbs import GBSSpecs + + +class Chip2Specs(CircuitSpecs): + """Circuit specifications for the chip2 class of circuits.""" + + short_name = "chip2" + modes = 8 + remote = True + local = True + interactive = False + + sq_amplitude = 1 + + primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} + decompositions = { + "Interferometer": {"mesh": "rectangular_symmetric", "drop_identity": False}, + "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False}, + "MZgate": {}, + } + + circuit = textwrap.dedent( + """\ + name template_4x2_chip2 + version 1.0 + target chip2 (shots=1) + + # for n spatial degrees, first n signal modes, then n idler modes, all phases zero + S2gate({squeezing_amplitude_0}, 0.0) | [0, 4] + S2gate({squeezing_amplitude_1}, 0.0) | [1, 5] + S2gate({squeezing_amplitude_2}, 0.0) | [2, 6] + S2gate({squeezing_amplitude_3}, 0.0) | [3, 7] + + # standard 4x4 interferometer for the signal modes (the lower ones in frequency) + # even phase indices correspond to external Mach-Zehnder interferometer phases + # odd phase indices correspond to internal Mach-Zehnder interferometer phases + # MZI_0 + Rgate({phase_0}) | [0] + BSgate(pi/4, pi/2) | [0, 1] + Rgate({phase_1}) | [0] + BSgate(pi/4, pi/2) | [0, 1] + # MZI_1 + Rgate({phase_2}) | [2] + BSgate(pi/4, pi/2) | [2, 3] + Rgate({phase_3}) | [2] + BSgate(pi/4, pi/2) | [2, 3] + # MZI_2 + Rgate({phase_4}) | [1] + BSgate(pi/4, pi/2) | [1, 2] + Rgate({phase_5}) | [1] + BSgate(pi/4, pi/2) | [1, 2] + # MZI_3 + Rgate({phase_6}) | [0] + BSgate(pi/4, pi/2) | [0, 1] + Rgate({phase_7}) | [0] + BSgate(pi/4, pi/2) | [0, 1] + # MZI_4 + Rgate({phase_8}) | [2] + BSgate(pi/4, pi/2) | [2, 3] + Rgate({phase_9}) | [2] + BSgate(pi/4, pi/2) | [2, 3] + # MZI_5 + Rgate({phase_10}) | [1] + BSgate(pi/4, pi/2) | [1, 2] + Rgate({phase_11}) | [1] + BSgate(pi/4, pi/2) | [1, 2] + + # duplicate the interferometer for the idler modes (the higher ones in frequency) + # MZI_0 + Rgate({phase_0}) | [4] + BSgate(pi/4, pi/2) | [4, 5] + Rgate({phase_1}) | [4] + BSgate(pi/4, pi/2) | [4, 5] + # MZI_1 + Rgate({phase_2}) | [6] + BSgate(pi/4, pi/2) | [6, 7] + Rgate({phase_3}) | [6] + BSgate(pi/4, pi/2) | [6, 7] + # MZI_2 + Rgate({phase_4}) | [5] + BSgate(pi/4, pi/2) | [5, 6] + Rgate({phase_5}) | [5] + BSgate(pi/4, pi/2) | [5, 6] + # MZI_3 + Rgate({phase_6}) | [4] + BSgate(pi/4, pi/2) | [4, 5] + Rgate({phase_7}) | [4] + BSgate(pi/4, pi/2) | [4, 5] + # MZI_4 + Rgate({phase_8}) | [6] + BSgate(pi/4, pi/2) | [6, 7] + Rgate({phase_9}) | [6] + BSgate(pi/4, pi/2) | [6, 7] + # MZI_5 + Rgate({phase_10}) | [5] + BSgate(pi/4, pi/2) | [5, 6] + Rgate({phase_11}) | [5] + BSgate(pi/4, pi/2) | [5, 6] + + # add final dummy phases to allow mapping any unitary to this template (these do not + # affect the photon number measurement) + Rgate({final_phase_0}) | [0] + Rgate({final_phase_1}) | [1] + Rgate({final_phase_2}) | [2] + Rgate({final_phase_3}) | [3] + Rgate({final_phase_4}) | [4] + Rgate({final_phase_5}) | [5] + Rgate({final_phase_6}) | [6] + Rgate({final_phase_7}) | [7] + + # measurement in Fock basis + MeasureFock() | [0, 1, 2, 3, 4, 5, 6, 7] + """ + ) + + def compile(self, seq, registers): + """Try to arrange a quantum circuit into a form suitable for Chip2. + + Args: + seq (Sequence[Command]): quantum circuit to modify + registers (Sequence[RegRefs]): quantum registers + Returns: + List[Command]: modified circuit + Raises: + CircuitError: the circuit does not correspond to Chip2 + """ + # pylint: disable=too-many-statements,too-many-branches + + # first do general GBS compilation to make sure + # Fock measurements are correct + # --------------------------------------------- + seq = GBSSpecs().compile(seq, registers) + A, B, C = group_operations(seq, lambda x: isinstance(x, ops.MeasureFock)) + + if len(B[0].reg) != self.modes: + raise CircuitError("All modes must be measured.") + + # Check circuit begins with two mode squeezers + # -------------------------------------------- + A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate)) + + regrefs = set() + + if B: + # get set of circuit registers as a tuple for each S2gate + regrefs = {(cmd.reg[0].ind, cmd.reg[1].ind) for cmd in B} + + # the set of allowed mode-tuples the S2gates must have + allowed_modes = set(zip(range(0, 4), range(4, 8))) + + if not regrefs.issubset(allowed_modes): + raise CircuitError("S2gates do not appear on the correct modes.") + + # ensure provided S2gates all have the allowed squeezing values + allowed_sq_value = {(0.0, 0.0), (self.sq_amplitude, 0.0)} + sq_params = {(float(cmd.op.p[0]), float(cmd.op.p[1])) for cmd in B} + + if not sq_params.issubset(allowed_sq_value): + wrong_params = sq_params - allowed_sq_value + raise CircuitError( + "Incorrect squeezing value(s) (r, phi)={}. Allowed squeezing " + "value(s) are (r, phi)={}.".format(wrong_params, allowed_sq_value) + ) + + # determine which modes do not have input S2gates specified + missing = allowed_modes - regrefs + + for i, j in missing: + # insert S2gates with 0 squeezing + seq.insert(0, Command(ops.S2gate(0, 0), [registers[i], registers[j]])) + + # Check if matches the circuit template + # -------------------------------------------- + # This will avoid superfluous unitary compilation. + try: + seq = super().compile(seq, registers) + except CircuitError: + # failed topology check. Continue to more general + # compilation below. + pass + else: + return seq + + # Compile the unitary: combine and then decompose all unitaries + # ------------------------------------------------------------- + A, B, C = group_operations(seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate))) + + # begin unitary lists for mode [0, 1, 2, 3] and modes [4, 5, 6, 7] with + # two identity matrices. This is because multi_dot requires + # at least two matrices in the list. + U_list0 = [np.identity(self.modes // 2, dtype=np.complex128)] * 2 + U_list4 = [np.identity(self.modes // 2, dtype=np.complex128)] * 2 + + if not B: + # no interferometer was applied + A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate)) + A = B # move the S2gates to A + else: + for cmd in B: + # calculate the unitary matrix representing each + # rotation gate and each beamsplitter + modes = [i.ind for i in cmd.reg] + params = par_evaluate(cmd.op.p) + U = np.identity(self.modes // 2, dtype=np.complex128) + + if isinstance(cmd.op, ops.Rgate): + m = modes[0] + U[m % 4, m % 4] = np.exp(1j * params[0]) + + elif isinstance(cmd.op, ops.BSgate): + m, n = modes + + t = np.cos(params[0]) + r = np.exp(1j * params[1]) * np.sin(params[0]) + + U[m % 4, m % 4] = t + U[m % 4, n % 4] = -np.conj(r) + U[n % 4, m % 4] = r + U[n % 4, n % 4] = t + + if set(modes).issubset({0, 1, 2, 3}): + U_list0.insert(0, U) + elif set(modes).issubset({4, 5, 6, 7}): + U_list4.insert(0, U) + else: + raise CircuitError( + "Unitary must be applied separately to modes [0, 1, 2, 3] and modes [4, 5, 6, 7]." + ) + + # multiply all unitaries together, to get the final + # unitary representation on modes [0, 1] and [2, 3]. + U0 = multi_dot(U_list0) + U4 = multi_dot(U_list4) + + # check unitaries are equal + if not np.allclose(U0, U4): + raise CircuitError( + "Interferometer on modes [0, 1, 2, 3] must be identical to interferometer on modes [4, 5, 6, 7]." + ) + + U = block_diag(U0, U4) + + # replace B with an interferometer + B = [ + Command(ops.Interferometer(U0), registers[:4]), + Command(ops.Interferometer(U4), registers[4:]), + ] + + # decompose the interferometer, using Mach-Zehnder interferometers + B = self.decompose(B) + + # Do a final circuit topology check + # --------------------------------- + seq = super().compile(A + B + C, registers) + return seq diff --git a/tests/frontend/test_circuitspecs_chip2.py b/tests/frontend/test_circuitspecs_chip2.py new file mode 100644 index 000000000..858d3b7ea --- /dev/null +++ b/tests/frontend/test_circuitspecs_chip2.py @@ -0,0 +1,449 @@ +# Copyright 2019 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +r"""Unit tests for the CircuitSpec class""" +import textwrap + +import pytest +import numpy as np +import networkx as nx + +import blackbird + +import strawberryfields as sf +import strawberryfields.ops as ops + +from strawberryfields.parameters import par_evaluate +from strawberryfields.program_utils import CircuitError, list_to_DAG +from strawberryfields.io import to_program +from strawberryfields.utils import random_interferometer +from strawberryfields.circuitspecs.chip2 import Chip2Specs, CircuitSpecs + + +pytestmark = pytest.mark.frontend + +np.random.seed(42) + +SQ_AMPLITUDE = 1 +"""float: the allowed squeezing amplitude""" + + +def program_equivalence(prog1, prog2, compare_params=True, atol=1e-6, rtol=0): + r"""Checks if two programs are equivalent. + + This function converts the program lists into directed acyclic graphs, + and runs the NetworkX `is_isomorphic` graph function in order + to determine if the two programs are equivalent. + + Note: when checking for parameter equality between two parameters + :math:`a` and :math:`b`, we use the following formula: + + .. math:: |a - b| \leq (\texttt{atol} + \texttt{rtol}\times|b|) + + Args: + prog1 (strawberryfields.program.Program): quantum program + prog2 (strawberryfields.program.Program): quantum program + compare_params (bool): Set to ``False`` to turn of comparing + program parameters; equivalency will only take into + account the operation order. + atol (float): the absolute tolerance parameter for checking + quantum operation parameter equality + rtol (float): the relative tolerance parameter for checking + quantum operation parameter equality + + Returns: + bool: returns ``True`` if two quantum programs are equivalent + """ + DAG1 = list_to_DAG(prog1.circuit) + DAG2 = list_to_DAG(prog2.circuit) + + circuit = [] + for G in [DAG1, DAG2]: + # relabel the DAG nodes to integers + circuit.append(nx.convert_node_labels_to_integers(G)) + + # add node attributes to store the operation name and parameters + name_mapping = {i: n.op.__class__.__name__ for i, n in enumerate(G.nodes())} + parameter_mapping = {i: par_evaluate(n.op.p) for i, n in enumerate(G.nodes())} + + # CXgate and BSgate are not symmetric wrt to permuting the order of the two + # modes it acts on; i.e., the order of the wires matter + wire_mapping = {} + for i, n in enumerate(G.nodes()): + if n.op.__class__.__name__ == "CXgate": + if np.allclose(n.op.p[0], 0): + # if the CXgate parameter is 0, wire order doesn't matter + wire_mapping[i] = 0 + else: + # if the CXgate parameter is not 0, order matters + wire_mapping[i] = [j.ind for j in n.reg] + + elif n.op.__class__.__name__ == "BSgate": + if np.allclose([j % np.pi for j in par_evaluate(n.op.p)], [np.pi/4, np.pi/2]): + # if the beamsplitter is *symmetric*, then the order of the + # wires does not matter. + wire_mapping[i] = 0 + else: + # beamsplitter is not symmetric, order matters + wire_mapping[i] = [j.ind for j in n.reg] + + else: + # not a CXgate or a BSgate, order of wires doesn't matter + wire_mapping[i] = 0 + + # TODO: at the moment, we do not check for whether an empty + # wire will match an operation with trivial parameters. + # Maybe we can do this in future, but this is a subgraph + # isomorphism problem and much harder. + + nx.set_node_attributes(circuit[-1], name_mapping, name="name") + nx.set_node_attributes(circuit[-1], parameter_mapping, name="p") + nx.set_node_attributes(circuit[-1], wire_mapping, name="w") + + def node_match(n1, n2): + """Returns True if both nodes have the same name and + same parameters, within a certain tolerance""" + name_match = n1["name"] == n2["name"] + p_match = np.allclose(n1["p"], n2["p"], atol=atol, rtol=rtol) + wire_match = n1["w"] == n2["w"] + + if compare_params: + return name_match and p_match and wire_match + + return name_match and wire_match + + # check if circuits are equivalent + return nx.is_isomorphic(circuit[0], circuit[1], node_match) + + +class DummyCircuit(CircuitSpecs): + """Dummy circuit used to instantiate + the abstract base class""" + + modes = 8 + remote = False + local = True + interactive = True + primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} + decompositions = {"Interferometer": {}, "MZgate": {}} + + +class TestChip2Compilation: + """Tests for compilation using the Chip2 circuit specification""" + + def test_exact_template(self, tol): + """Test compilation works for the exact circuit""" + bb = blackbird.loads(Chip2Specs.circuit) + bb = bb( + squeezing_amplitude_0=SQ_AMPLITUDE, + squeezing_amplitude_1=SQ_AMPLITUDE, + squeezing_amplitude_2=SQ_AMPLITUDE, + squeezing_amplitude_3=SQ_AMPLITUDE, + phase_0=0, + phase_1=1, + phase_2=2, + phase_3=3, + phase_4=4, + phase_5=5, + phase_6=6, + phase_7=7, + phase_8=8, + phase_9=9, + phase_10=10, + phase_11=11, + final_phase_0=1.24, + final_phase_1=-0.54, + final_phase_2=4.12, + final_phase_3=0, + final_phase_4=1.24, + final_phase_5=-0.54, + final_phase_6=4.12, + final_phase_7=0, + ) + + expected = to_program(bb) + res = expected.compile("chip2") + + assert program_equivalence(res, expected, atol=tol) + + def test_not_all_modes_measured(self): + """Test exceptions raised if not all modes are measured""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | (q[0], q[1]) + + with pytest.raises(CircuitError, match="All modes must be measured"): + res = prog.compile("chip2") + + def test_no_s2gates(self, tol): + """Test identity S2gates are inserted when no S2gates + are provided.""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + expected = sf.Program(8) + + with expected.context as q: + ops.S2gate(0) | (q[0], q[4]) + ops.S2gate(0) | (q[1], q[5]) + ops.S2gate(0) | (q[2], q[6]) + ops.S2gate(0) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + res = prog.compile("chip2") + expected = expected.compile("chip2") + assert program_equivalence(res, expected, atol=tol) + + def test_missing_s2gates(self, tol): + """Test identity S2gates are inserted when some (but not all) + S2gates are included.""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + expected = sf.Program(8) + + with expected.context as q: + ops.S2gate(0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE) | (q[1], q[5]) + ops.S2gate(0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + res = prog.compile("chip2") + expected = expected.compile("chip2") + assert program_equivalence(res, expected, atol=tol) + + def test_incorrect_s2gate_modes(self): + """Test exceptions raised if S2gates do not appear on correct modes""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[1]) + ops.S2gate(SQ_AMPLITUDE) | (q[2], q[3]) + ops.S2gate(SQ_AMPLITUDE) | (q[4], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[7], q[6]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + with pytest.raises(CircuitError, match="S2gates do not appear on the correct modes"): + res = prog.compile("chip2") + + def test_incorrect_s2gate_params(self): + """Test exceptions raised if S2gates have illegal parameters""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.S2gate(0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE+0.1) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + with pytest.raises(CircuitError, match=r"Incorrect squeezing value\(s\) \(r, phi\)={\(1.1, 0.0\)}"): + res = prog.compile("chip2") + + def test_s2gate_repeated_modes(self): + """Test exceptions raised if S2gates are repeated""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + with pytest.raises(CircuitError, match="incompatible topology."): + res = prog.compile("chip2") + + def test_no_unitary(self, tol): + """Test compilation works with no unitary provided""" + prog = sf.Program(8) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE) | (q[3], q[7]) + ops.MeasureFock() | q + + res = prog.compile("chip2") + expected = sf.Program(8) + + with expected.context as q: + ops.S2gate(SQ_AMPLITUDE, 0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) + + # corresponds to an identity on modes [0, 1, 2, 3] + ops.Rgate(0) | (q[0]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) + ops.Rgate(0) | (q[0]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) + ops.Rgate(0) | (q[2]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) + ops.Rgate(0) | (q[2]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) + ops.Rgate(np.pi) | (q[1]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) + ops.Rgate(np.pi) | (q[1]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) + ops.Rgate(0) | (q[0]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) + ops.Rgate(0) | (q[0]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) + ops.Rgate(0) | (q[2]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) + ops.Rgate(0) | (q[2]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) + ops.Rgate(0) | (q[1]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) + ops.Rgate(np.pi) | (q[1]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) + ops.Rgate(np.pi) | (q[0]) + ops.Rgate(0) | (q[1]) + ops.Rgate(np.pi) | (q[2]) + ops.Rgate(-np.pi) | (q[3]) + + # corresponds to an identity on modes [4, 5, 6, 7] + ops.Rgate(0) | (q[4]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) + ops.Rgate(0) | (q[4]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) + ops.Rgate(0) | (q[6]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) + ops.Rgate(0) | (q[6]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) + ops.Rgate(np.pi) | (q[5]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) + ops.Rgate(np.pi) | (q[5]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) + ops.Rgate(0) | (q[4]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) + ops.Rgate(0) | (q[4]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) + ops.Rgate(0) | (q[6]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) + ops.Rgate(0) | (q[6]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) + ops.Rgate(0) | (q[5]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) + ops.Rgate(np.pi) | (q[5]) + ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) + ops.Rgate(np.pi) | (q[4]) + ops.Rgate(0) | (q[5]) + ops.Rgate(np.pi) | (q[6]) + ops.Rgate(-np.pi) | (q[7]) + + ops.MeasureFock() | q + + assert program_equivalence(res, expected, atol=tol) + + def test_interferometers(self, tol): + """Test that the compilation correctly decomposes the interferometer using + the rectangular_symmetric mesh""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE, 0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + res = prog.compile("chip2") + + expected = sf.Program(8) + + with expected.context as q: + ops.S2gate(SQ_AMPLITUDE, 0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) + ops.Interferometer(U, mesh="rectangular_symmetric", drop_identity=False) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U, mesh="rectangular_symmetric", drop_identity=False) | (q[4], q[5], q[6], q[7]) + ops.MeasureFock() | q + + expected = expected.compile(DummyCircuit()) + + assert program_equivalence(res, expected, atol=tol) + + def test_unitaries_do_not_match(self): + """Test exception raised if the unitary applied to modes [0, 1, 2, 3] is + different to the unitary applied to modes [4, 5, 6, 7]""" + prog = sf.Program(8) + U = random_interferometer(4) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE, 0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.BSgate() | (q[2], q[3]) + ops.MeasureFock() | q + + with pytest.raises(CircuitError, match="must be identical to interferometer"): + res = prog.compile("chip2") + + def test_unitary_too_large(self): + """Test exception raised if the unitary is applied to more + than just modes [0, 1, 2, 3] and [4, 5, 6, 7].""" + prog = sf.Program(8) + U = random_interferometer(8) + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE, 0) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) + ops.Interferometer(U) | q + ops.MeasureFock() | q + + with pytest.raises(CircuitError, match="must be applied separately"): + res = prog.compile("chip2") From da6cef8d999e9c0570808e527482700bc17eb3fd Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 2 Dec 2019 14:38:21 -0500 Subject: [PATCH 106/335] Add virtual backends (#257) --- strawberryfields/backends/__init__.py | 27 +++++++++++++------ .../backends/chip0backend/__init__.py | 15 ----------- .../backends/chip0backend/backend.py | 27 ------------------- 3 files changed, 19 insertions(+), 50 deletions(-) delete mode 100644 strawberryfields/backends/chip0backend/__init__.py delete mode 100644 strawberryfields/backends/chip0backend/backend.py diff --git a/strawberryfields/backends/__init__.py b/strawberryfields/backends/__init__.py index 3fd33d69d..b0d1b2534 100644 --- a/strawberryfields/backends/__init__.py +++ b/strawberryfields/backends/__init__.py @@ -16,7 +16,7 @@ from .base import BaseBackend, BaseFock, BaseGaussian, ModeMap from .gaussianbackend import GaussianBackend from .fockbackend import FockBackend -from .chip0backend import Chip0Backend + __all__ = [ "BaseBackend", @@ -24,18 +24,20 @@ "BaseGaussian", "FockBackend", "GaussianBackend", - "TFBackend", - "Chip0Backend", + "TFBackend" ] -supported_backends = { - b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend, Chip0Backend) + +virtual_backends = ["chip0", "chip2"] + +local_backends = { + b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend) } def load_backend(name): """Loads the specified backend by mapping a string - to the backend type, via the ``supported_backends`` + to the backend type, via the ``local_backends`` dictionary. Note that this function is used by the frontend only, and should not be user-facing. """ @@ -46,8 +48,17 @@ def load_backend(name): return TFBackend() - if name in supported_backends: - backend = supported_backends[name]() + if name in virtual_backends: + # Backend is a remote device/simulator, that has a + # defined circuit spec, but no local backend class. + # By convention, the short name and corresponding + # circuit spec are the same. + backend_attrs = {"short_name": name, "circuit_spec": name} + backend_class = type(name, (BaseBackend,), backend_attrs) + return backend_class() + + if name in local_backends: + backend = local_backends[name]() return backend raise ValueError("Backend '{}' is not supported.".format(name)) diff --git a/strawberryfields/backends/chip0backend/__init__.py b/strawberryfields/backends/chip0backend/__init__.py deleted file mode 100644 index 4d27a60a8..000000000 --- a/strawberryfields/backends/chip0backend/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .backend import Chip0Backend diff --git a/strawberryfields/backends/chip0backend/backend.py b/strawberryfields/backends/chip0backend/backend.py deleted file mode 100644 index c6a29fd4d..000000000 --- a/strawberryfields/backends/chip0backend/backend.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Chip0 backend interface -======================= - -""" - - -from strawberryfields.backends.base import BaseBackend - - -class Chip0Backend(BaseBackend): - short_name = "chip0" - circuit_spec = "chip0" From e82bfce167608ea120d7f72370803d88051320c1 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 2 Dec 2019 22:14:16 -0500 Subject: [PATCH 107/335] Chip2spec now defined in terms of MZgates (#258) --- strawberryfields/circuitspecs/chip2.py | 84 ++++++---------------- tests/frontend/test_circuitspecs_chip2.py | 87 ++++++++++------------- 2 files changed, 57 insertions(+), 114 deletions(-) diff --git a/strawberryfields/circuitspecs/chip2.py b/strawberryfields/circuitspecs/chip2.py index 76b1cd918..226a947b6 100644 --- a/strawberryfields/circuitspecs/chip2.py +++ b/strawberryfields/circuitspecs/chip2.py @@ -18,6 +18,7 @@ from numpy.linalg import multi_dot from scipy.linalg import block_diag +from strawberryfields.decompositions import mach_zehnder from strawberryfields.program_utils import CircuitError, Command, group_operations from strawberryfields.parameters import par_evaluate import strawberryfields.ops as ops @@ -37,11 +38,10 @@ class Chip2Specs(CircuitSpecs): sq_amplitude = 1 - primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} + primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate", "MZgate"} decompositions = { "Interferometer": {"mesh": "rectangular_symmetric", "drop_identity": False}, - "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False}, - "MZgate": {}, + "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False} } circuit = textwrap.dedent( @@ -59,68 +59,20 @@ class Chip2Specs(CircuitSpecs): # standard 4x4 interferometer for the signal modes (the lower ones in frequency) # even phase indices correspond to external Mach-Zehnder interferometer phases # odd phase indices correspond to internal Mach-Zehnder interferometer phases - # MZI_0 - Rgate({phase_0}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - Rgate({phase_1}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - # MZI_1 - Rgate({phase_2}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - Rgate({phase_3}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - # MZI_2 - Rgate({phase_4}) | [1] - BSgate(pi/4, pi/2) | [1, 2] - Rgate({phase_5}) | [1] - BSgate(pi/4, pi/2) | [1, 2] - # MZI_3 - Rgate({phase_6}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - Rgate({phase_7}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - # MZI_4 - Rgate({phase_8}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - Rgate({phase_9}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - # MZI_5 - Rgate({phase_10}) | [1] - BSgate(pi/4, pi/2) | [1, 2] - Rgate({phase_11}) | [1] - BSgate(pi/4, pi/2) | [1, 2] + MZgate({phase_0}, {phase_1}) | [0, 1] + MZgate({phase_2}, {phase_3}) | [2, 3] + MZgate({phase_4}, {phase_5}) | [1, 2] + MZgate({phase_6}, {phase_7}) | [0, 1] + MZgate({phase_8}, {phase_9}) | [2, 3] + MZgate({phase_10}, {phase_11}) | [1, 2] # duplicate the interferometer for the idler modes (the higher ones in frequency) - # MZI_0 - Rgate({phase_0}) | [4] - BSgate(pi/4, pi/2) | [4, 5] - Rgate({phase_1}) | [4] - BSgate(pi/4, pi/2) | [4, 5] - # MZI_1 - Rgate({phase_2}) | [6] - BSgate(pi/4, pi/2) | [6, 7] - Rgate({phase_3}) | [6] - BSgate(pi/4, pi/2) | [6, 7] - # MZI_2 - Rgate({phase_4}) | [5] - BSgate(pi/4, pi/2) | [5, 6] - Rgate({phase_5}) | [5] - BSgate(pi/4, pi/2) | [5, 6] - # MZI_3 - Rgate({phase_6}) | [4] - BSgate(pi/4, pi/2) | [4, 5] - Rgate({phase_7}) | [4] - BSgate(pi/4, pi/2) | [4, 5] - # MZI_4 - Rgate({phase_8}) | [6] - BSgate(pi/4, pi/2) | [6, 7] - Rgate({phase_9}) | [6] - BSgate(pi/4, pi/2) | [6, 7] - # MZI_5 - Rgate({phase_10}) | [5] - BSgate(pi/4, pi/2) | [5, 6] - Rgate({phase_11}) | [5] - BSgate(pi/4, pi/2) | [5, 6] + MZgate({phase_0}, {phase_1}) | [4, 5] + MZgate({phase_2}, {phase_3}) | [6, 7] + MZgate({phase_4}, {phase_5}) | [5, 6] + MZgate({phase_6}, {phase_7}) | [4, 5] + MZgate({phase_8}, {phase_9}) | [6, 7] + MZgate({phase_10}, {phase_11}) | [5, 6] # add final dummy phases to allow mapping any unitary to this template (these do not # affect the photon number measurement) @@ -208,7 +160,7 @@ def compile(self, seq, registers): # Compile the unitary: combine and then decompose all unitaries # ------------------------------------------------------------- - A, B, C = group_operations(seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate))) + A, B, C = group_operations(seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate, ops.MZgate))) # begin unitary lists for mode [0, 1, 2, 3] and modes [4, 5, 6, 7] with # two identity matrices. This is because multi_dot requires @@ -232,6 +184,10 @@ def compile(self, seq, registers): m = modes[0] U[m % 4, m % 4] = np.exp(1j * params[0]) + elif isinstance(cmd.op, ops.MZgate): + m, n = modes + U = mach_zehnder(m % 4, n % 4, params[0], params[1], self.modes // 2) + elif isinstance(cmd.op, ops.BSgate): m, n = modes diff --git a/tests/frontend/test_circuitspecs_chip2.py b/tests/frontend/test_circuitspecs_chip2.py index 858d3b7ea..48c54d5e7 100644 --- a/tests/frontend/test_circuitspecs_chip2.py +++ b/tests/frontend/test_circuitspecs_chip2.py @@ -134,8 +134,8 @@ class DummyCircuit(CircuitSpecs): remote = False local = True interactive = True - primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} - decompositions = {"Interferometer": {}, "MZgate": {}} + primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate", "MZgate"} + decompositions = {"Interferometer": {}} class TestChip2Compilation: @@ -296,6 +296,29 @@ def test_s2gate_repeated_modes(self): with pytest.raises(CircuitError, match="incompatible topology."): res = prog.compile("chip2") + def test_mzgate(self): + """Test that combinations of MZgates, Rgates, and BSgates + correctly compile.""" + prog = sf.Program(8) + U = random_interferometer(4) + + def unitary(q): + ops.MZgate(0.5, 0.1) | (q[0], q[1]) + ops.BSgate(0.1, 0.2) | (q[1], q[2]) + ops.Rgate(0.4) | q[0] + + with prog.context as q: + ops.S2gate(SQ_AMPLITUDE) | (q[0], q[4]) + ops.S2gate(SQ_AMPLITUDE) | (q[1], q[5]) + ops.S2gate(SQ_AMPLITUDE) | (q[2], q[6]) + ops.S2gate(SQ_AMPLITUDE) | (q[3], q[7]) + + unitary(q[:4]) + unitary(q[4:]) + ops.MeasureFock() | q + + res = prog.compile("chip2") + def test_no_unitary(self, tol): """Test compilation works with no unitary provided""" prog = sf.Program(8) @@ -317,60 +340,24 @@ def test_no_unitary(self, tol): ops.S2gate(SQ_AMPLITUDE, 0) | (q[3], q[7]) # corresponds to an identity on modes [0, 1, 2, 3] - ops.Rgate(0) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(0) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(0) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(0) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(np.pi) | (q[1]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) - ops.Rgate(np.pi) | (q[1]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) - ops.Rgate(0) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(0) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(0) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(0) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(0) | (q[1]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) - ops.Rgate(np.pi) | (q[1]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[1], q[2]) + ops.MZgate(0, 0) | (q[0], q[1]) + ops.MZgate(0, 0) | (q[2], q[3]) + ops.MZgate(np.pi, np.pi) | (q[1], q[2]) + ops.MZgate(0, 0) | (q[0], q[1]) + ops.MZgate(0, 0) | (q[2], q[3]) + ops.MZgate(0, np.pi) | (q[1], q[2]) ops.Rgate(np.pi) | (q[0]) ops.Rgate(0) | (q[1]) ops.Rgate(np.pi) | (q[2]) ops.Rgate(-np.pi) | (q[3]) # corresponds to an identity on modes [4, 5, 6, 7] - ops.Rgate(0) | (q[4]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) - ops.Rgate(0) | (q[4]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) - ops.Rgate(0) | (q[6]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) - ops.Rgate(0) | (q[6]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) - ops.Rgate(np.pi) | (q[5]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) - ops.Rgate(np.pi) | (q[5]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) - ops.Rgate(0) | (q[4]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) - ops.Rgate(0) | (q[4]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[4], q[5]) - ops.Rgate(0) | (q[6]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) - ops.Rgate(0) | (q[6]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[6], q[7]) - ops.Rgate(0) | (q[5]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) - ops.Rgate(np.pi) | (q[5]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[5], q[6]) + ops.MZgate(0, 0) | (q[4], q[5]) + ops.MZgate(0, 0) | (q[6], q[7]) + ops.MZgate(np.pi, np.pi) | (q[5], q[6]) + ops.MZgate(0, 0) | (q[4], q[5]) + ops.MZgate(0, 0) | (q[6], q[7]) + ops.MZgate(0, np.pi) | (q[5], q[6]) ops.Rgate(np.pi) | (q[4]) ops.Rgate(0) | (q[5]) ops.Rgate(np.pi) | (q[6]) From 80cd60bfc4174900b3469cafcf096eb07ae7b171 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 3 Dec 2019 20:12:29 -0500 Subject: [PATCH 108/335] adjust tolerance --- strawberryfields/circuitspecs/chip2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/circuitspecs/chip2.py b/strawberryfields/circuitspecs/chip2.py index 226a947b6..b960d7e88 100644 --- a/strawberryfields/circuitspecs/chip2.py +++ b/strawberryfields/circuitspecs/chip2.py @@ -130,7 +130,7 @@ def compile(self, seq, registers): # ensure provided S2gates all have the allowed squeezing values allowed_sq_value = {(0.0, 0.0), (self.sq_amplitude, 0.0)} - sq_params = {(float(cmd.op.p[0]), float(cmd.op.p[1])) for cmd in B} + sq_params = {(float(np.round(cmd.op.p[0], 3)), float(cmd.op.p[1])) for cmd in B} if not sq_params.issubset(allowed_sq_value): wrong_params = sq_params - allowed_sq_value From b142f057800f3e61e7ae3dec52094ee7c22b980b Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 4 Dec 2019 10:26:42 -0500 Subject: [PATCH 109/335] get starship engine working --- starship | 1 + 1 file changed, 1 insertion(+) diff --git a/starship b/starship index 3070131ba..9266d0e57 100755 --- a/starship +++ b/starship @@ -89,6 +89,7 @@ if __name__ == "__main__": sys.exit() elif args.hello: client = APIClient() + client.BASE_URL = "https://platform.strawberryfields.ai/healthz" try: response = client.get("") except Exception as e: From 6b43b50e8e6b93fe0483086036a5f1695473747c Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Wed, 4 Dec 2019 12:35:31 -0500 Subject: [PATCH 110/335] fix output --- starship | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/starship b/starship index 9266d0e57..bffc2948a 100755 --- a/starship +++ b/starship @@ -117,11 +117,11 @@ if __name__ == "__main__": result = eng.run(program) if result and result.samples is not None: - if hasattr(args, "output_path"): - with open(args.output_path, "w") as file: - file.write(str(result.samples)) + if hasattr(args, "output"): + with open(args.output, "w") as file: + file.write(str(result.samples.T)) else: - sys.stdout.write(str(result.samples)) + sys.stdout.write(str(result.samples.T)) if args.debug: pdb.set_trace() From 530143b1e50abce1c5d0e6c4efaf5dccc95d7d46 Mon Sep 17 00:00:00 2001 From: Jeremy Swinarton Date: Tue, 17 Dec 2019 16:01:33 -0500 Subject: [PATCH 111/335] Remove outdated hafnian dependency (#264) Removes the outdated dependency on the hafnian library. This library was renamed to thewalrus, so the project effectively depends on two versions of the same library. --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index f5573e9c4..2dee20203 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,6 @@ "networkx>=2.0", "quantum-blackbird>=0.2.0", "python-dateutil>=2.8.0", - "hafnian>=0.6", "thewalrus>=0.7", "toml", "appdirs", From 781359b8384ce02dde5f2ca4025a7af6ce7cebfc Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 30 Dec 2019 15:22:12 -0500 Subject: [PATCH 112/335] fixes docs --- doc/code/sf_engine.rst | 2 +- doc/introduction/starship.rst | 4 ++-- strawberryfields/api_client.py | 18 ------------------ 3 files changed, 3 insertions(+), 21 deletions(-) diff --git a/doc/code/sf_engine.rst b/doc/code/sf_engine.rst index 29ef4a887..ffb0644d6 100644 --- a/doc/code/sf_engine.rst +++ b/doc/code/sf_engine.rst @@ -14,4 +14,4 @@ sf.engine .. automodapi:: strawberryfields.engine :no-heading: :include-all-objects: - :skip: Engine, Sequence, stack, shape, load_backend, NotApplicableError, BaseBackend + :skip: Engine, Sequence, stack, shape, load_backend, NotApplicableError, BaseBackend, sleep, APIClient, Job, JobNotQueuedError,JobExecutionError,to_blackbird,DEFAULT_CONFIG diff --git a/doc/introduction/starship.rst b/doc/introduction/starship.rst index 25c1587f0..0ce081094 100644 --- a/doc/introduction/starship.rst +++ b/doc/introduction/starship.rst @@ -1,7 +1,7 @@ .. _starship: -Running Jobs with StarshipEngine -################################ +StarshipEngine +############## .. sectionauthor:: Zeid Zabaneh diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py index 61dc044d6..8066c8d16 100644 --- a/strawberryfields/api_client.py +++ b/strawberryfields/api_client.py @@ -13,14 +13,6 @@ # limitations under the License. """ -APIClient library -================= - -**Module name:** :mod:`strawberryfields.api_client` - -.. currentmodule:: strawberryfields.api_client - - This module provides a thin client that communicates with the Xanadu Platform API over the HTTP protocol, based on the requests module. It also provides helper classes to facilitate interacting with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient @@ -63,16 +55,6 @@ job.result.reload() # Reloads the JobResult object from the API job.manager.get(1536) # Fetches job 1536 from the server and updates the instance - -Classes -------- - -.. autosummary:: - APIClient - Resource - ResourceManager - Field - Job """ From 9ce3b7c6f1ead40289fd6ad46df58113e746f890 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 30 Dec 2019 15:52:29 -0500 Subject: [PATCH 113/335] fix doc merging bugs --- doc/code/sf_configuration.rst | 14 + doc/index.rst | 3 +- doc/introduction/configuration.rst | 47 +++ doc/introduction/starship.rst | 539 ++++++++++++++++++++++------- doc/introduction/tutorials.rst | 19 + strawberryfields/configuration.py | 71 ---- 6 files changed, 503 insertions(+), 190 deletions(-) create mode 100644 doc/code/sf_configuration.rst create mode 100644 doc/introduction/configuration.rst diff --git a/doc/code/sf_configuration.rst b/doc/code/sf_configuration.rst new file mode 100644 index 000000000..0dad2eb49 --- /dev/null +++ b/doc/code/sf_configuration.rst @@ -0,0 +1,14 @@ +sf.configuration +================ + +.. currentmodule:: strawberryfields.configuration + +.. warning:: + + Unless you are a Strawberry Fields developer, you likely do not need + to access this module directly. + +.. automodapi:: strawberryfields.configuration + :no-heading: + :skip: user_config_dir + :no-inheritance-diagram: diff --git a/doc/index.rst b/doc/index.rst index 5e1616f88..f3218857a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -153,8 +153,8 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic introduction/circuits introduction/ops introduction/states - introduction/starship introduction/tutorials + introduction/configuration zreferences .. toctree:: @@ -178,6 +178,7 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/sf_backends code/sf_circuitspecs code/sf_circuitdrawer + code/sf_configuration code/sf_decompositions code/sf_engine code/sf_io diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst new file mode 100644 index 000000000..e49c893b6 --- /dev/null +++ b/doc/introduction/configuration.rst @@ -0,0 +1,47 @@ +Configuration +============= + +On first import, Strawberry Fields attempts to load the configuration file ``config.toml``, by +scanning the following three directories in order of preference: + +1. The current directory +2. The path stored in the environment variable ``SF_CONF`` +3. The default user configuration directory: + + * On Linux: ``~/.config/strawberryfields`` + * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` + * On MacOS: ``~/Library/Application\ Support/strawberryfields`` + +If no configuration file is found, a warning message will be displayed in the logs, +and all device parameters will need to be passed as keyword arguments when +loading the device. + +The user can access the initialized configuration via `strawberryfields.config`, view the +loaded configuration filepath, print the configurations options, access and modify +them via keys, and save/load new configuration files. + +Configuration files +------------------- + +The configuration file ``config.toml`` uses the `TOML standard `_, +and has the following format: + +.. code-block:: toml + + [api] + # Options for the Strawberry Fields Cloud API + authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" + hostname = "localhost" + use_ssl = true + +Summary of options +------------------ + +SF_API_USE_SSL: + Whether to use SSL or not when connecting to the API. True or False. +SF_API_HOSTNAME: + The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed + hosts. +SF_API_AUTHENTICATION_TOKEN: + The authentication token to use when connecting to the API. Will be sent with every request in + the header. diff --git a/doc/introduction/starship.rst b/doc/introduction/starship.rst index 0ce081094..940991e32 100644 --- a/doc/introduction/starship.rst +++ b/doc/introduction/starship.rst @@ -9,199 +9,502 @@ In this section, we provide a tutorial of the **StarshipEngine**, an engine used cloud platform and execute jobs remotely (e.g., on a quantum chip). Configuring StarshipEngine -========================== +-------------------------- Before using StarshipEngine, you need to configure the hostname and authentication token that will provide you access to the API. The easiest way is to create a configuration file named ``config.toml`` in your working directory. A typical file looks like this: -.. code-block:: console +.. code-block:: toml - [api] - hostname = "platform.example.com" - authentication_token = "ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc" + [api] + hostname = "platform.strawberryfields.ai" + authentication_token = "ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc" You can generate this file interactively by using the ``starship`` command as follows, answering the questions in the prompts. -.. code-block:: console +.. code-block:: text - starship --reconfigure - Please enter the hostname of the server to connect to: [platform.example.com] - Please enter the authentication token to use when connecting: [] ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc - Would you like to save these settings to a local cofiguration file in the current directory? [Y/n] y - Writing configuration file to current working directory... + $ starship --reconfigure + Please enter the hostname of the server to connect to: [localhost] platform.strawberryfields.ai + Please enter the authentication token to use when connecting: [] ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc + Would you like to save these settings to a local cofiguration file in the current directory? [Y/n] y + Writing configuration file to current working directory... To test connectivity, you can use the following command: -.. code-block:: console +.. code-block:: text - starship --hello - You have successfully authenticated to the platform! + $ starship --hello + You have successfully authenticated to the platform! .. _first_program: -Executing your first program -============================ +Submitting a Blackbird script +----------------------------- The easiest way to execute a program using StarshipEngine is to create a Blackbird script (an ``xbb`` file) -and place it in your current working directory. Check the :ref:`Blackbird tutorial ` for how to create this file. +and place it in your current working directory. For this example, consider the following Blackbird script, which represents a quantum program that matches -exactly the gate layout of the `chip0` photonic hardware device. We will save the following file as ``test.xbb`` -in our current working directory: +exactly the gate layout of the ``chip2`` photonic hardware device. We will save the following +file as ``test.xbb`` in our current working directory: .. code-block:: python - name template_2x2_chip0 # Name of the program - version 1.0 # Blackbird version number - target chip0 (shots = 50) # This program will run on chip0 for 50 shots - - # Define the interferometer phase values - float phi0 = 0.574 - float phi1 = 1.33 - - # final local phases - float local_phase_0 = -0.543 - float local_phase_1 = 2.43 - float local_phase_2 = 0.11 - float local_phase_3 = -3.21 - - # Initial states are two-mode squeezed states - S2gate(1.0, 0.0) | [0, 2] - S2gate(1.0, 0.0) | [1, 3] - - # A standard two-mode interferometer is applied - # to the first pair of modes - Rgate(phi0) | [0] - BSgate(pi/4, pi/2) | [0, 1] - Rgate(phi1) | [0] - BSgate(pi/4, pi/2) | [0, 1] - - # The 2x2 interferometer above is duplicated - # for the second pair of modes - Rgate(phi0) | [2] - BSgate(pi/4, pi/2) | [2, 3] - Rgate(phi1) | [2] - BSgate(pi/4, pi/2) | [2, 3] - - # final local phases - Rgate(local_phase_0) | 0 - Rgate(local_phase_1) | 1 - Rgate(local_phase_2) | 2 - Rgate(local_phase_3) | 3 - - # Perform a photon number counting measurement - MeasureFock() | [0, 1, 2, 3] + name template_4x2_chip0 # Name of the program + version 1.0 # Blackbird version number + target chip2 (shots = 100) # This program will run on chip2 for 50 shots + + # define the squeezing amplitude + float r = 1.0 + + # Define the interferometer phase values + float phi0 = 0.574 + float phi1 = 1.33 + float phi2 = 0.654 + float phi3 = -2.3 + float phi4 = 0.065 + float phi5 = 0.654 + float phi6 = 1.23 + float phi7 = -1.63 + float phi8 = 0.065 + float phi9 = 0.654 + float phi10 = 1.23 + float phi11 = -1.63 + + # Initial states are two-mode squeezed states + S2gate(r, 0.0) | [0, 4] + S2gate(r, 0.0) | [1, 5] + S2gate(r, 0.0) | [2, 6] + S2gate(r, 0.0) | [3, 7] + + # A standard four-mode interferometer is applied + # to the signal modes (the ones lower in frequency) + MZgate(phi0, phi1) | [0, 1] + MZgate(phi2, phi3) | [2, 3] + MZgate(phi4, phi5) | [1, 2] + MZgate(phi6, phi7) | [0, 1] + MZgate(phi8, phi9) | [2, 3] + MZgate(phi10, phi11) | [1, 2] + + # final local phases + Rgate(0.765) | [0] + Rgate(-0.123) | [1] + Rgate(0.654) | [2] + Rgate(-0.651) | [3] + + # The 4x4 interferometer above is duplicated + # for the idler modes (the ones higher in frequency) + MZgate(phi0, phi1) | [4, 5] + MZgate(phi2, phi3) | [6, 7] + MZgate(phi4, phi5) | [5, 6] + MZgate(phi6, phi7) | [4, 5] + MZgate(phi8, phi9) | [6, 7] + MZgate(phi10, phi11) | [5, 6] + + # final local phases + Rgate(0.765) | [4] + Rgate(-0.123) | [5] + Rgate(0.654) | [6] + Rgate(-0.651) | [7] + + # Perform a photon number counting measurement + MeasureFock() | [0, 1, 2, 3, 4, 5, 6, 7] After you have created your Blackbird script, you can execute it using the command line, or using a Python shell. Executing your Blackbird script using Python --------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To execute this file using Python, you can use a code block like this: .. code-block:: python3 - from strawberryfields import StarshipEngine - from strawberryfields.io import load + from strawberryfields import StarshipEngine + from strawberryfields.io import load - eng = StarshipEngine() - prog = load("test.xbb") - result = eng.run(prog) - print(result.samples) + eng = StarshipEngine("chip2") + prog = load("test.xbb") + result = eng.run(prog) + print(result.samples) Executing your Blackbird script from the command line ------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To execute this file from the command line, use the ``starship`` command as follows: .. code-block:: console - starship --input test.xbb --output out.txt + starship --input test.xbb --output out.txt After executing the above command, the result will be stored in ``out.txt`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. +Submitting via Strawberry Fields +-------------------------------- + +In this section, we will use Strawberry Fields to submit a simple +circuit to the chip. + +.. code-block:: python3 + + import numpy as np + + import strawberryfields as sf + from strawberryfields import ops + from strawberryfields import StarshipEngine + from strawberryfields.utils import random_interferometer + +We choose a random 4x4 interferometer + +>>> U = random_interferometer(4) +>>> print(U) +array([[-0.13879438-0.47517904j,-0.29303954-0.47264099j,-0.43951987+0.12977568j, -0.03496718-0.48418713j], +[ 0.06065372-0.11292765j, 0.54733962+0.1215551j, -0.50721513+0.56195975j, -0.15923161+0.26606674j], +[ 0.42212573-0.53182417j, -0.2642572 +0.50625182j, 0.19448705+0.28321781j, 0.30281396-0.05582391j], +[ 0.43097587-0.30288974j, 0.07419772-0.21155126j, 0.28335618-0.13633175j, -0.75113453+0.09580304j]]) + +Next we create the program + +.. code-block:: python3 + + prog = sf.Program(8) + + with prog.context as q: + # Initial squeezed states + # Allowed values are r=1.0 or r=0.0 + ops.S2gate(1.0) | (q[0], q[4]) + ops.S2gate(1.0) | (q[1], q[5]) + ops.S2gate(1.0) | (q[3], q[7]) + + # Interferometer on the signal modes (0-3) + ops.Interferometer(U) | (q[0], q[1], q[2], q[3]) + ops.BSgate(0.543, 0.123) | (q[2], q[0]) + ops.Rgate(0.453) | q[1] + ops.MZgate(0.65, -0.54) | (q[2], q[3]) + + # *Same* interferometer on the idler modes (4-7) + ops.Interferometer(U) | (q[4], q[5], q[6], q[7]) + ops.BSgate(0.543, 0.123) | (q[6], q[4]) + ops.Rgate(0.453) | q[5] + ops.MZgate(0.65, -0.54) | (q[6], q[7]) + + ops.MeasureFock() | q + +We create the engine. The engine is in charge of compiling and executing +programs on the remote device. + +>>> eng = StarshipEngine("chip2") + +We run the engine by calling ``eng.run``, and pass it the program we +want to run. + +>>> results = eng.run(prog, shots=20) +Job e6ead866-04c9-4d48-ba28-680e8639fc41 is sent to server. +>>> results.samples.T +array([[0, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 2], + [0, 0, 0, 0, 0, 1, 0, 0], + [1, 0, 0, 0, 0, 0, 3, 0], + [3, 0, 0, 0, 2, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [1, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 0, 2, 1, 2], + [2, 0, 1, 0, 1, 0, 0, 0]]) +>>> np.mean(results.samples.T, axis=0) +array([0.4 , 0.1 , 0.15, 0.05, 0.3 , 0.3 , 0.45, 0.35]) + + +We can convert the samples into counts using the following function: + +.. code-block:: python3 + + from collections import Counter + + def count(samples): + bitstrings = [tuple(i) for i in samples] + return {k:v for k, v in Counter(bitstrings).items()} + +>>> samples = np.array([[0, 2],[1, 0],[0, 1],[0, 0],[0, 0],[2, 0],[0, 1],[0, 1]]) +>>> counts = count(samples) +>>> print(counts) +{(0, 2): 1, (1, 0): 1, (0, 1): 3, (0, 0): 2, (2, 0): 1} +>>> counts[(0, 0)] +2 + +.. _compilation: + Program compilation -=================== +------------------- In addition to using the program template above, which directly matches the physical -layout of the hardware device, you can apply any two-mode interferometer to the pairs of modes. -The interferometer can be composed of any combination -of beamsplitters (:class:`~.ops.BSgate`), rotations/phase shifts (:class:`~.ops.Rgate`). -Furthermore, you can use the :class:`~.ops.Interferometer` command to directly pass a -unitary matrix to be decomposed and compiled to match the device architecture. +layout of the hardware device, you can apply any four-mode interferometer to the pairs of modes. -For example, consider the following Blackbird script: +Primitive gates supported by chip2 include any combination of: + +* `General beamsplitters `_ (``ops.BSgate``), + +* `Mach-Zehnder interfomerters `_ (``ops.MZgate``), or +* `rotations/phase shifts `_ (``ops.Rgate``). + +Furthermore, several automatic decompositions are supported: + +* You can use the :class:`~.ops.Interferometer` command to directly pass a + unitary matrix to be decomposed and compiled to match the device architecture. + This performs a rectangular decomposition using Mach-Zehnder interferometers. + +* You can use :class:`~.ops.BipartiteGraphEmbed` to embed a bipartite graph on + the GBS chip. Note, however, that the decomposed squeezing values depends on the graph + structure, so only bipartite graphs that result in equal squeezing on all + modes can currently be executed on chip2. + +For example, consider the following Blackbird script: .. code-block:: python - name compilation_example # Name of the program - version 1.0 # Blackbird version number - target chip0 (shots=50) # This program will run on chip0 for 50 shots + name compilation_example # Name of the program + version 1.0 # Blackbird version number + target chip2 (shots=100) # This program will run on chip0 for 50 shots - # Define a unitary matrix - complex array U[2, 2] = - -0.1955885-0.16833594j, 0.77074506+0.58254631j - -0.03596574+0.96546083j, 0.00676031+0.2579654j + # Define a unitary matrix + complex array U[4, 4] = + 0.09980516-0.78971535j, 0.53374613+0.07984545j, -0.21161788+0.10047649j, -0.01337026-0.14167555j + -0.12759979-0.00425289j, 0.14089156+0.40091225j, 0.31942372-0.21453252j, -0.79775306+0.13657774j + -0.18224807+0.30281836j, 0.26930442-0.04644871j, -0.46045639-0.55359506j, -0.0737605-0.52580999j + 0.19903677-0.43076659j, -0.50320649-0.44750373j, -0.01617065-0.52755812j, -0.19729219+0.06200712j - # Initial states are two-mode squeezed states, - # applied to alternating pairs of modes. - S2gate(1.0, 0.0) | [0, 2] - S2gate(1.0, 0.0) | [1, 3] + # Initial states are two-mode squeezed states + S2gate(1.0, 0.0) | [0, 4] + S2gate(1.0, 0.0) | [1, 5] + S2gate(1.0, 0.0) | [2, 6] + S2gate(1.0, 0.0) | [3, 7] - # Apply the unitary matrix above to - # the first pair of modes, as well - # as a beamsplitter - Interferometer(U) | [0, 1] - BSgate(0.543, -0.123) | [0, 1] + # Apply the unitary matrix above to + # the first pair of modes, as well + # as a beamsplitter + Interferometer(U) | [0, 1, 2, 3] + BSgate(0.543, -0.123) | [0, 1] - # Duplicate the above unitary for - # the second pair of modes - Interferometer(U) | [2, 3] - BSgate(0.543, -0.123) | [2, 3] + # Duplicate the above unitary for + # the second pair of modes + Interferometer(U) | [4, 5, 6, 7] + BSgate(0.543, -0.123) | [4, 5] - # Perform a PNR measurement in the Fock basis - MeasureFock() | [0, 1, 2, 3] + # Perform a PNR measurement in the Fock basis + MeasureFock() | [0, 1, 2, 3, 4, 5, 6, 7] -.. note:: You may use :func:`~.random_interferometer` to generate arbitrary random unitaries. +**Note:** You may use ``random_interferometer`` to generate arbitrary random unitaries. This program will execute following the same steps as above; ``StarshipEngine`` will automatically -compile the program to match the layout of the chip described in :ref:`first_program`. +compile the program to match the layout of the chip. You may wish to view the compiled program; this can be easily done in Python using -the :meth:`~.Program.compile` method: +the ``Program.compile`` method: >>> from strawberryfields import StarshipEngine >>> from strawberryfields.io import load >>> prog = load("test.xbb") ->>> prog = prog.compile("chip0") +>>> prog = prog.compile("chip2") >>> prog.print() -S2gate(1, 0) | (q[0], q[2]) -S2gate(1, 0) | (q[1], q[3]) -Rgate(0.9355) | (q[0]) -BSgate(0.7854, 1.571) | (q[0], q[1]) -Rgate(4.886) | (q[0]) -BSgate(0.7854, 1.571) | (q[0], q[1]) -Rgate(-0.3742) | (q[0]) -Rgate(-0.05099) | (q[1]) -Rgate(0.9355) | (q[2]) -BSgate(0.7854, 1.571) | (q[2], q[3]) -Rgate(4.886) | (q[2]) -BSgate(0.7854, 1.571) | (q[2], q[3]) -Rgate(-0.3742) | (q[2]) -Rgate(-0.05099) | (q[3]) -MeasureFock | (q[0], q[1], q[2], q[3]) - -and even saved as a new Blackbird script using the :func:`~io.save` function: +S2gate(1, 0) | (q[0], q[4]) +S2gate(1, 0) | (q[3], q[7]) +S2gate(1, 0) | (q[2], q[6]) +MZgate(1.573, 4.368) | (q[2], q[3]) +MZgate(1.573, 4.368) | (q[6], q[7]) +S2gate(1, 0) | (q[1], q[5]) +MZgate(1.228, 5.006) | (q[0], q[1]) +MZgate(4.414, 3.859) | (q[1], q[2]) +MZgate(2.98, 3.316) | (q[2], q[3]) +Rgate(-0.7501) | (q[3]) +MZgate(5.397, 5.494) | (q[0], q[1]) +MZgate(5.152, 4.891) | (q[1], q[2]) +Rgate(2.544) | (q[2]) +MZgate(1.228, 5.006) | (q[4], q[5]) +MZgate(4.414, 3.859) | (q[5], q[6]) +MZgate(2.98, 3.316) | (q[6], q[7]) +Rgate(-0.7501) | (q[7]) +MZgate(5.397, 5.494) | (q[4], q[5]) +MZgate(5.152, 4.891) | (q[5], q[6]) +Rgate(2.544) | (q[6]) +Rgate(-1.173) | (q[1]) +Rgate(1.902) | (q[4]) +Rgate(1.902) | (q[0]) +Rgate(-1.173) | (q[5]) +MeasureFock | (q[0], q[1], q[2], q[3], q[4], q[5], q[6], q[7]) + +and even saved as a new Blackbird script using the ``io.save`` function: >>> from strawberryfields.io import save >>> save("test_compiled.xbb", prog) + + +Tips and tricks +--------------- + +.. code-block:: python3 + + from strawberryfields.utils import operation + +We can define an operation to make it easier to apply the same unitary +to both signal and idler modes. + +.. code-block:: python3 + + @operation(4) + def unitary(q): + ops.Interferometer(U) | q + ops.BSgate(0.543, 0.123) | (q[2], q[0]) + + prog = sf.Program(8) + + with prog.context as q: + ops.S2gate(1.0) | (q[0], q[4]) + ops.S2gate(1.0) | (q[1], q[5]) + ops.S2gate(1.0) | (q[2], q[6]) + ops.S2gate(1.0) | (q[3], q[7]) + + unitary() | q[:4] + unitary() | q[4:] + + +Embedding bipartite graphs +-------------------------- + +We can embed bipartite graphs, with the restriction that the singular +values form the set :math:`\{0, d\}` for some real value :math:`d`. + +The matrix :math:`B` represents the edges between the two sets of +vertices in the graph, and :math:`A` is the full adjacency matrix +:math:`A = \begin{bmatrix}0 & B\\ B^T & 0\end{bmatrix}`. Here, we will +consider a complete bipartite graph, since we know that the singular +values are of the form :math:`\{d, 0\}`. + +.. code-block:: python3 + + B = np.ones([4, 4]) + A = np.block([[0*B, B], [B.T, 0*B]]) + + prog = sf.Program(8) + + # the following mean photon number per mode + # quantity is set to ensure that the singular values + # are scaled such that all squeezers have value 1 + m = 0.345274461385554870545 + + with prog.context as q: + ops.BipartiteGraphEmbed(A, mean_photon_per_mode=m) | q + ops.MeasureFock() | q + + +>>> prog.compile("chip2").print() +S2gate(1, 0) | (q[0], q[4]) +S2gate(0, 0) | (q[3], q[7]) +S2gate(0, 0) | (q[2], q[6]) +MZgate(3.598, 5.444) | (q[2], q[3]) +MZgate(3.598, 5.444) | (q[6], q[7]) +S2gate(0, 0) | (q[1], q[5]) +MZgate(0, 5.236) | (q[0], q[1]) +MZgate(4.886, 5.496) | (q[1], q[2]) +MZgate(0.7106, 4.492) | (q[2], q[3]) +Rgate(0.9284) | (q[3]) +MZgate(2.922, 3.142) | (q[0], q[1]) +MZgate(4.528, 3.734) | (q[1], q[2]) +Rgate(-2.51) | (q[2]) +MZgate(0, 5.236) | (q[4], q[5]) +MZgate(4.886, 5.496) | (q[5], q[6]) +MZgate(0.7106, 4.492) | (q[6], q[7]) +Rgate(0.9284) | (q[7]) +MZgate(2.922, 3.142) | (q[4], q[5]) +MZgate(4.528, 3.734) | (q[5], q[6]) +Rgate(-2.51) | (q[6]) +Rgate(-2.51) | (q[1]) +Rgate(-0.8273) | (q[4]) +Rgate(-0.8273) | (q[0]) +Rgate(-2.51) | (q[5]) +MeasureFock | (q[0], q[1], q[2], q[3], q[4], q[5], q[6], q[7]) + +The squeezing values required to embed this bipartite graph are given by +the following relation: + +>>> from thewalrus.quantum import find_scaling_adjacency_matrix +>>> c = find_scaling_adjacency_matrix(A, 2*4*0.345274461385554870545) +>>> set(np.arctanh(np.linalg.svd(c*A)[1])) +{0.0, 1.0000000000000002} + +Note that the above squeezing values must be of the form :math:`\{0,1\}` +to be embedded on the chip. Consider a bipartite graph where this is not +the case: + +>>> B = np.array([[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 1, 1], [1, 0, 1, 0]]) +>>> A = np.block([[0*B, B], [B.T, 0*B]]) +>>> c = find_scaling_adjacency_matrix(A, 2*4*1) +>>> set(np.arctanh(np.linalg.svd(c*A)[1])) +{0.0, +3.2937343775007984e-32, +0.17674864137317442, +0.17674864137317453, +0.8180954232791708, +0.8180954232791715, +1.3361892276414615} + +The program will fail to compile for chip2: + +.. code-block:: python3 + + prog = sf.Program(8) + + with prog.context as q: + ops.BipartiteGraphEmbed(A, mean_photon_per_mode=1) | q + ops.MeasureFock() | q + + prog.compile("chip2").print() + +.. code-block:: bash + + --------------------------------------------------------------------------- + + CircuitError Traceback (most recent call last) + + in + 5 ops.MeasureFock() | q + 6 + ----> 7 prog.compile("chip2").print() + + + ~/Dropbox/Work/Xanadu/sf_cloud/strawberryfields/program.py in compile(self, target, **kwargs) + 522 # does the circuit spec have its own compilation method? + 523 if db.compile is not None: + --> 524 seq = db.compile(seq, self.register) + 525 + 526 # create the compiled Program + + + ~/Dropbox/Work/Xanadu/sf_cloud/strawberryfields/circuitspecs/chip2.py in compile(self, seq, registers) + 137 raise CircuitError( + 138 "Incorrect squeezing value(s) (r, phi)={}. Allowed squeezing " + --> 139 "value(s) are (r, phi)={}.".format(wrong_params, allowed_sq_value) + 140 ) + 141 + + + CircuitError: Incorrect squeezing value(s) (r, phi)={(1.336, 0.0), (0.177, 0.0), (0.818, 0.0)}. Allowed squeezing value(s) are (r, phi)={(1, 0.0), (0.0, 0.0)}. diff --git a/doc/introduction/tutorials.rst b/doc/introduction/tutorials.rst index c3c292309..e9ad51849 100644 --- a/doc/introduction/tutorials.rst +++ b/doc/introduction/tutorials.rst @@ -133,3 +133,22 @@ Algorithms

+ +Demo +---- + +.. toctree:: + :hidden: + :maxdepth: 1 + + /introduction/starship + +.. customgalleryitem:: + :tooltip: StarshipEngine + :description: :doc:`/introduction/starship` + :figure: /gallery/gate_visualisation/GateVisualisation.gif + +.. raw:: html + +
+
diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index de7649c07..2a66eea4b 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -12,79 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. r""" -Configuration -============= - -**Module name:** :mod:`strawberryfields.configuration` - -.. currentmodule:: strawberryfields.configuration - This module contains the :class:`Configuration` class, which is used to load, store, save, and modify configuration options for Strawberry Fields. - -Behaviour --------- - -On first import, Strawberry Fields attempts to load the configuration file `config.toml`, by -scanning the following three directories in order of preference: - -1. The current directory -2. The path stored in the environment variable ``SF_CONF`` -3. The default user configuration directory: - - * On Linux: ``~/.config/strawberryfields`` - * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` - * On MacOS: ``~/Library/Application\ Support/strawberryfields`` - -If no configuration file is found, a warning message will be displayed in the logs, -and all device parameters will need to be passed as keyword arguments when -loading the device. - -The user can access the initialized configuration via `strawberryfields.config`, view the -loaded configuration filepath, print the configurations options, access and modify -them via keys, and save/load new configuration files. - -Configuration files -------------------- - -The configuration file `config.toml` uses the `TOML standard `_, -and has the following format: - -.. code-block:: toml - - [api] - # Options for the Strawberry Fields Cloud API - authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" - hostname = "localhost" - use_ssl = true - -Summary of options ------------------- - -SF_API_USE_SSL: - Whether to use SSL or not when connecting to the API. True or False. -SF_API_HOSTNAME: - The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed - hosts. -SF_API_AUTHENTICATION_TOKEN: - The authentication token to use when connecting to the API. Will be sent with every request in - the header. - -Summary of methods ------------------- - -.. currentmodule:: strawberryfields.configuration.Configuration - -.. autosummary:: - path - load - save - -Code details -~~~~~~~~~~~~ - -.. currentmodule:: strawberryfields.configuration - """ import os import logging as log From 7e67e688d78f473f4c5b9cb0f328d39a81d2b4c7 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 18 Feb 2020 13:55:27 -0500 Subject: [PATCH 114/335] First go with having function in the configuration.py --- strawberryfields/configuration.py | 72 +++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 2a66eea4b..cf1c5cc9d 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -55,6 +55,78 @@ class ConfigurationError(Exception): """Exception used for configuration errors""" +_user_config_dir = user_config_dir("strawberryfields", "Xanadu") +_env_config_dir = os.environ.get("SF_CONF", "") + +# This function will be used by the Connection object +def read_config(name="config.toml", **kwargs): + _name = name + + # Search the current directory, the directory under environment + # variable SF_CONF, and default user config directory, in that order. + directories = [os.getcwd(), _env_config_dir, _user_config_dir] + for directory in directories: + _filepath = os.path.join(directory, _name) + try: + config = load_config_file.load(_filepath) + except FileNotFoundError: + log.info("No Strawberry Fields configuration file found.") + config = False + + if config: + self.update_config() + else: + log.info("No Strawberry Fields configuration file found.") + # TODO: add logic for parsing from environmental variables + +# This function will be user-facing +# calling on the save_config function +def write_config_file(name="config.toml", path=_user_config_dir, **kwargs): + + # TODO: create a config object similar to DEFAULT_CONFIG + save_config_file(path, config) + +def update_config(_config): + """Updates the configuration from either a loaded configuration + file, or from an environment variable. + + The environment variable takes precedence.""" + for section, section_config in _config.items(): + env_prefix = "SF_{}_".format(section.upper()) + + for key in section_config: + # Environment variables take precedence + env = env_prefix + key.upper() + + if env in os.environ: + # Update from environment variable + _config[section][key] = parse_environment_variable(env, os.environ[env]) + elif _config_file and key in _config_file[section]: + # Update from configuration file + _config[section][key] = _config_file[section][key] + return _config + +def load_config_file(filepath): + """Load a configuration file. + + Args: + filepath (str): path to the configuration file + """ + with open(filepath, "r") as f: + _config_file = toml.load(f) + + return _config_file + +def save_config_file(filepath, config): + """Save a configuration file. + + Args: + filepath (str): path to the configuration file + config (dict of str: dict of str: Union[boolean, str, float]) + """ + with open(filepath, "w") as f: + toml.dump(config, f) + class Configuration: """Configuration class. From 221a5f34c6a2b87355ff74c74b714626b55f4448 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 14 Feb 2020 17:40:23 -0500 Subject: [PATCH 115/335] Refactor to Engine/Connection API --- strawberryfields/engine.py | 512 +++++++++++++++++++++++++-------- tests/frontend/test_engine.py | 524 +++++++++++++--------------------- 2 files changed, 589 insertions(+), 447 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 0053cae3f..d12386d09 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -19,16 +19,20 @@ """ import abc import collections.abc +from datetime import datetime +import enum +import json +import requests import time +from urllib.parse import urljoin import numpy as np from .backends import load_backend -from .backends.base import (NotApplicableError, BaseBackend) +from .backends.base import NotApplicableError, BaseBackend -from strawberryfields.api_client import APIClient, Job, JobNotQueuedError, JobExecutionError +from strawberryfields.configuration import Configuration from strawberryfields.io import to_blackbird -from strawberryfields.configuration import DEFAULT_CONFIG class OneJobAtATimeError(Exception): @@ -39,7 +43,6 @@ class OneJobAtATimeError(Exception): __all__ = ["Result", "BaseEngine", "LocalEngine"] - class Result: """Result of a quantum computation. @@ -88,8 +91,9 @@ def __init__(self, samples): self._state = None # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array + # TODO adjust the format as specified in the ADR if len(np.shape(samples)) > 1: - samples = np.stack(samples, 1) + samples = np.vstack(samples) self._samples = samples @property @@ -126,6 +130,7 @@ def state(self): Returns: BaseState: quantum state returned from program execution """ + # TODO raise error if called for remote job return self._state def __str__(self): @@ -351,7 +356,9 @@ def _broadcast_nones(val, dim): # signatures of methods in Operations to remain cleaner, since only # Measurements need to know about shots - prev = self.run_progs[-1] if self.run_progs else None # previous program segment + prev = ( + self.run_progs[-1] if self.run_progs else None + ) # previous program segment for p in program: if prev is None: # initialize the backend @@ -360,7 +367,9 @@ def _broadcast_nones(val, dim): # there was a previous program segment if not p.can_follow(prev): raise RuntimeError( - "Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name) + "Register mismatch: program {}, '{}'.".format( + len(self.run_progs), p.name + ) ) # Copy the latest measured values in the RegRefs of p. @@ -384,7 +393,8 @@ def _broadcast_nones(val, dim): self._run_program(p, **kwargs) shots = kwargs.get("shots", 1) self.samples = [ - _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) + _broadcast_nones(p.reg_refs[k].val, shots) + for k in sorted(p.reg_refs) ] self.run_progs.append(p) @@ -456,7 +466,9 @@ def _run_program(self, prog, **kwargs): except NotApplicableError: # command is not applicable to the current backend type raise NotApplicableError( - "The operation {} cannot be used with {}.".format(cmd.op, self.backend) + "The operation {} cannot be used with {}.".format( + cmd.op, self.backend + ) ) from None except NotImplementedError: # command not directly supported by backend API @@ -518,7 +530,9 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): key: temp_run_options[key] for key in temp_run_options.keys() & eng_run_keys } - result = super()._run(program, args=args, compile_options=compile_options, **eng_run_options) + result = super()._run( + program, args=args, compile_options=compile_options, **eng_run_options + ) modes = temp_run_options["modes"] @@ -531,161 +545,427 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): return result -class StarshipEngine(BaseEngine): +class InvalidEngineTargetError(Exception): + """Raised when an invalid engine target is provided. """ - Starship quantum program executor engine. - Executes :class:`.Program` instances on the chosen remote backend, and makes - the results available via :class:`.Result`. - Args: - backend (str, BaseBackend): name of the backend, or a pre-constructed backend instance - polling_delay_seconds (int): the number of seconds to wait between queries when polling for - job results +class IncompleteJobError(Exception): + """Raised when an invalid action is performed on an incomplete job. """ - # This engine will execute jobs remotely. - REMOTE = True - def __init__(self, backend, polling_delay_seconds=1, **kwargs): - super().__init__(backend) +class CreateJobRequestError(Exception): + """Raised when a request to create a job fails. + """ - api_client_params = {k: v for k, v in kwargs.items() if k in DEFAULT_CONFIG["api"].keys()} - self.client = APIClient(**api_client_params) - self.polling_delay_seconds = polling_delay_seconds - self.jobs = [] - def __str__(self): - return self.__class__.__name__ + "({})".format(self.backend_name) +class GetAllJobsRequestError(Exception): + """Raised when a request to get all jobs fails. + """ - def _init_backend(self, *args): - """ - TODO: This does not do anything right now. - """ - # Do nothing for now... - pass - def reset(self): - """ - Reset must be called in order to submit a new job. This clears the job queue as well as - any ran Programs. +class GetJobRequestError(Exception): + """Raised when a request to get a job fails. + """ + + +class GetJobResultRequestError(Exception): + """Raised when a request to get a job result fails. + """ + + +class GetJobCircuitRequestError(Exception): + """Raised when a request to get a job circuit fails. + """ + + +class CancelJobRequestError(Exception): + """Raised when a request to cancel a job fails. + """ + + +class RefreshTerminalJobError(Exception): + """Raised when attempting to refresh a completed, failed, or cancelled job.""" + + +class CancelTerminalJobError(Exception): + """Raised when attempting to cancel a completed, failed, or cancelled job.""" + + +class StarshipEngine: + """A quantum program executor engine that that provides a simple interface for + running remote jobs in a synchronous or asynchronous manner. + + **Example:** + + The following example instantiates a `StarshipEngine` with default configuration, and + runs jobs both synchronously and asynchronously. + + .. code-block:: python + + engine = StarshipEngine("chip2") + + # Run a job synchronously + job = engine.run(program, shots=1) + # (Engine blocks until job is complete) + job.status # JobStatus.COMPLETE + job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] + + # Run a job synchronously, but cancel it before it is completed + job = engine.run(program, shots=1) + ^C # KeyboardInterrupt cancels the job + job.status # "cancelled" + job.result # JobCancelledError + + # Run a job asynchronously + job = engine.run_async(program, shots=1) + job.status # "queued" + job.result # RefreshTerminalJobError + # (After some time...) + job.refresh() + job.status # "complete" + job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] + + Args: + target (str): the target backend + connection (Connection): a connection to the remote job execution platform + """ + + POLLING_INTERVAL_SECONDS = 1 + VALID_TARGETS = ("chip2",) + + def __init__(self, target, connection=None): + if target not in self.VALID_TARGETS: + raise InvalidEngineTargetError("Invalid engine target: {}".format(target)) + if connection is None: + # TODO use the global config once implemented + config = Configuration().api + connection = Connection( + token=config["authentication_token"], + host=config["hostname"], + port=config["port"], + use_ssl=config["use_ssl"], + ) + + self._target = target + self._connection = connection + + @property + def target(self): + """The target backend used by the engine. + + Returns: + str: the target backend used by the engine """ - super().reset(backend_options={}) - self.jobs.clear() + return self._target + + @property + def connection(self): + """Returns the connection object used by the engine. - def _get_blackbird(self, shots, program): + Returns: + strawberryfields.engine.Connection: the connection object used by the engine """ - Returns a Blackbird object to be sent later to the server when creating a job. - Assumes the current backend as the target. + return self._connection + + def run(self, program, shots=1): + """Runs a remote job synchronously. + + In this synchronous mode, the engine blocks until the job is completed, failed, or + cancelled, at which point the `Job` is returned. Args: - shots (int): the number of shots - program (Program): program to be converted to Blackbird code + program (Program): the quantum circuit + shots (int): the number of shots for which to run the job Returns: - blackbird.BlackbirdProgram + Job: the resulting remote job """ - bb = to_blackbird(program, version="1.0") + job = self.run_async(program) + try: + # TODO worth setting a timeout here? + while True: + job.refresh() + if job.status in (JobStatus.COMPLETE, JobStatus.FAILED): + return job + time.sleep(self.POLLING_INTERVAL_SECONDS) + except KeyboardInterrupt: + self._connection.cancel_job(job_id) + job.status = JobStatus.CANCELLED + return job - # TODO: This is potentially not needed here - bb._target["name"] = self.backend_name - bb._target["options"] = {"shots": shots, **program.backend_options} - return bb + def run_async(self, program, shots=1): + """Runs a remote job asynchronously. - def _queue_job(self, job_content): - """ - Create a Job instance based on job_content, and send the job to the API. Append to list - of jobs. + In this asynchronous mode, a `Job` is returned immediately, and the user can + manually refresh the status of the job. Args: - job_content (str): the Blackbird code to execute + program (Program): the quantum circuit + shots (int): the number of shots for which to run the job Returns: - (strawberryfields.api_client.Job): a Job instance referencing the queued job + Job: the created remote job """ - job = Job(client=self.client) - job.manager.create(circuit=job_content) - self.jobs.append(job) - print("Job {} is sent to server.".format(job.id.value)) - return job + bb = to_blackbird(program) + bb._target["name"] = self._target + bb._target["options"] = {"shots": shots} + # bb._target["options"] = {"shots": shots, **program.backend_options} + return self._connection.create_job(bb.serialize()) - def _run_program(self, program, **kwargs): - """ - Given a compiled program, gets the blackbird circuit code and creates (or resumes) a job - via the API. If the job is completed, returns the job result. - A queued job can be interrupted by a ``KeyboardInterrupt`` event, at which point if the - job ID was retrieved from the server, the job will be accessible via - :meth:`~.Starship.jobs`. +class Connection: + """Manages remote connections to the remote job execution platform and exposes + advanced job operations. - Args: - program (strawberryfields.program.Program): program to be executed remotely + For basic usage, it is not necessary to manually instantiate this object; the user + is encouraged to use the higher-level interface provided by `StarshipEngine`. + + Args: + TODO + """ + + # TODO adjust this + MAX_JOBS_REQUESTED = 100 + JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + USER_AGENT = "strawberryfields-client/0.1" + + def __init__( + self, token, host=None, port=None, use_ssl=None, debug=None, + ): + # TODO use `read_config` when implemented + # e.g. read_config(host="abc", port=123) + + self._token = token + self._host = host + self._port = port + self._use_ssl = use_ssl + # TODO what is this used for? + self._debug = debug + + @property + def token(self): + return self._token + + @property + def host(self): + return self._host + + @property + def port(self): + return self._port + + @property + def use_ssl(self): + return self._use_ssl + + @property + def debug(self): + return self._debug + + @property + def base_url(self): + return "http{}://{}:{}".format( + "s" if self.use_ssl else "", self.host, self.port + ) + + # TODO think about using serializers for the request wrappers - future PR maybe? + + def create_job(self, circuit): + response = self._post("/jobs", data=json.dumps({"circuit": circuit})) + if response.status_code == 201: + return Job( + id_=response.json()["id"], + status=JobStatus(response.json()["status"]), + connection=self, + ) + raise CreateJobRequestError(self._request_error_message(response)) + + def get_all_jobs(self, after=datetime(1970, 1, 1)): + # TODO figure out how to handle pagination from the user's perspective (if at all) + # TODO tentative until corresponding feature on platform side is finalized + response = self._get("/jobs?page[size]={}".format(self.MAX_JOBS_REQUESTED)) + if response.status_code == 200: + return [ + Job(id_=info["id"], status=info["status"], connection=self) + for info in response.json()["data"] + if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) + > after + ] + raise GetAllJobsRequestError(self._request_error_message(response)) + + def get_job(self, job_id): + response = self._get("/jobs/{}".format(job_id)) + if response.status_code == 200: + return Job( + id_=response.json()["id"], + status=JobStatus(response.json()["status"]), + connection=self, + ) + raise GetJobRequestError(self._request_error_message(response)) + + def get_job_status(self, job_id): + return JobStatus(self.get_job(job_id).status) + + def get_job_result(self, job_id): + # TODO get numpy here? + response = self._get("/jobs/{}/result".format(job_id)) + if response.status_code == 200: + return Result(response.json()["result"]) + raise GetJobResultRequestError(self._request_error_message(response)) + + # TODO is this necessary? + def get_job_circuit(self, job_id): + response = self._get("/jobs/{}/circuit".format(job_id)) + if response.status_code == 200: + return response.json()["circuit"] + raise GetJobCircuitRequestError(self._request_error_message(response)) + + def cancel_job(self, job_id): + response = self._patch( + "/jobs/{}".format(job_id), body={"status", JobStatus.CANCELLED.value} + ) + if response.status_code == 204: + return True + raise CancelJobRequestError(self._request_error_message(response)) + + def _get(self, path, **kwargs): + return self._request(RequestMethod.GET, path, **kwargs) + + def _post(self, path, **kwargs): + return self._request(RequestMethod.POST, path, **kwargs) + + def _patch(self, path, **kwargs): + return self._request(RequestMethod.PATCH, path, **kwargs) + + def _request(self, method, path, **kwargs): + return getattr(requests, method.value)( + urljoin(self.base_url, path), + headers={"Authorization": self.token, "User-Agent": self.USER_AGENT}, + **kwargs + ) + + def _request_error_message(self, response): + body = response.json() + return "{} ({}): {}".format( + body.get("status_code", ""), body.get("code", ""), body.get("detail", "") + ) + + +class RequestMethod(enum.Enum): + """Defines the valid request methods for messages sent to the remote job platform. + """ + GET = "get" + POST = "post" + PATCH = "patch" + + +class Job: + """Represents a remote job that can be queried for its status or result. + + This object should not be instantiated directly, but returned by an `Engine` or + `Connection` when a job is run. + + Args: + id_ (str): the job UUID + status (JobStatus): the job status + connection (Connection): the connection over which the job is managed + """ + + def __init__(self, id_, status, connection): + self._id = id_ + self._status = status + self._connection = connection + + # TODO need this? + self._circuit = None + self._result = None + + @property + def id(self): + return self._id + + @property + def status(self): + return self._status + + @property + def result(self): + """The job result. + + This is only defined for complete jobs, and raises a `JobNotCompleteError` for + any other status. Returns: - (list): a list representing the result samples + Result: the result + """ + if self.status != JobStatus.COMPLETE: + raise JobNotCompleteError( + "The result is undefined for jobs that are not complete " + "(current status: {})".format(self.status.value) + ) + return self._result - Raises: - Exception: In case a job could not be submitted or completed. - TypeError: In case a job is already queued and a user is trying to submit a new job. + def refresh(self): + """Refreshes the status of the job, along with the job result if the job is + newly completed. """ - if self.jobs: - raise OneJobAtATimeError( - "A job is already queued. Please reset the engine and try again." + if self.status.is_terminal: + raise RefreshTerminalJobError( + "A {} job cannot be refreshed".format(self.status.value) ) + self._status = self._connection.get_job_status(self.id) + if self._status == JobStatus.COMPLETE: + self._result = self._connection.get_job_result(self.id) - kwargs.update(program.run_options) - job_content = self._get_blackbird(program=program, **kwargs).serialize() - job = self._queue_job(job_content) + def cancel(self): + """Cancels the job. - try: - while not job.is_failed and not job.is_complete: - job.reload() - time.sleep(self.polling_delay_seconds) - except KeyboardInterrupt: - if job.id: - print("Job {} is queued in the background.".format(job.id.value)) - else: - self.reset() - raise JobNotQueuedError("Job was not sent to server. Please try again.") + Only a non-terminal (open or queued job) can be cancelled; a + `CancelTerminalJobError` is raised otherwise. + """ + if self.status.is_terminal: + raise CancelTerminalJobError( + "A {} job cannot be cancelled".format(self.status.value) + ) + self._connection.cancel_job(self.id) - # Job either failed or is complete - in either case, clear the job queue so that the engine is - # ready for future jobs. - self.reset() - if job.is_failed: - message = str(job.manager.http_response_data["meta"]) - raise JobExecutionError(message) - elif job.is_complete: - job.result.manager.get() - return job.result.result.value +class JobStatus(enum.Enum): + """Represents the status of a remote job. - def run(self, program, shots=1, **kwargs): - """Compile the given program and execute it by queuing a job in the Starship. + This class maps a set of job statuses to the string representations returned by the + remote platform. + """ - For the :class:`Program` instance given as input, the following happens: + OPEN = "open" + QUEUED = "queued" + CANCELLED = "cancelled" + COMPLETE = "complete" + FAILED = "failed" - * The Program instance is compiled for the target backend. - * The compiled program is sent as a job to the Starship - * The measurement results of each subsystem (if any) are stored in the :attr:`~.BaseEngine.samples`. - * The compiled program is appended to self.run_progs. - * The queued or completed jobs are appended to self.jobs. + def __repr__(self): + return self.value - Finally, the result of the computation is returned. + def __str__(self): + return self.value - Args: - program (Program, Sequence[Program]): quantum programs to run - shots (int): number of times the program measurement evaluation is repeated + @property + def is_terminal(self): + """Checks if this status represents a final and immutable state. - The ``kwargs`` keyword arguments are passed to :meth:`_run_program`. + This method is generally used to determine if an operation is valid for a given + status. Returns: - Result: results of the computation + bool: True if the job is terminal, and False otherwise """ - - return super()._run(program, args={}, compile_options={}, shots=shots, **kwargs) + return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) class Engine(LocalEngine): """dummy""" + # alias for backwards compatibility __doc__ = LocalEngine.__doc__ diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 4c1f14b81..c1574b54b 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -12,22 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. r"""Unit tests for engine.py""" +from datetime import datetime + import pytest from unittest.mock import MagicMock, call import strawberryfields as sf -from strawberryfields import StarshipEngine from strawberryfields import ops -from strawberryfields.api_client import APIClient, JobExecutionError from strawberryfields.backends.base import BaseBackend -pytestmark = pytest.mark.frontend - +from strawberryfields.configuration import Configuration +from strawberryfields.engine import ( + StarshipEngine, + Connection, + Job, + JobStatus, + Result, + InvalidEngineTargetError, + IncompleteJobError, + CreateJobRequestError, + GetAllJobsRequestError, + GetJobRequestError, + GetJobResultRequestError, + GetJobCircuitRequestError, + CancelJobRequestError, + RefreshTerminalJobError, + CancelTerminalJobError, +) -@pytest.fixture -def eng(backend): - """Engine fixture.""" - return sf.LocalEngine(backend) +pytestmark = pytest.mark.frontend @pytest.fixture @@ -39,336 +52,185 @@ def prog(backend): return prog +def mock_return(return_value): + return lambda *args, **kwargs: return_value + + +def mock_response(status_code, json_return_value): + response = MagicMock() + response.status_code = status_code + response.json.return_value = json_return_value + return response + + @pytest.fixture -def starship_engine(monkeypatch): - """ - Create a reusable StarshipEngine fixture without a real APIClient. - """ - mock_api_client = MagicMock() - monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine("chip0", polling_delay_seconds=0) - return engine - - -class TestEngine: - """Test basic engine functionality""" - - def test_load_backend(self): - """Backend can be correctly loaded via strings""" - eng = sf.LocalEngine("base") - assert isinstance(eng.backend, BaseBackend) - - def test_bad_backend(self): - """Backend must be a string or a BaseBackend instance.""" - with pytest.raises(TypeError, match="backend must be a string or a BaseBackend instance"): - eng = sf.LocalEngine(0) - - -class TestEngineProgramInteraction: - """Test the Engine class and its interaction with Program instances.""" - - def test_history(self, eng, prog): - """Engine history.""" - # no programs have been run - assert not eng.run_progs - eng.run(prog) - # one program has been run - assert len(eng.run_progs) == 1 - assert eng.run_progs[-1] == prog # no compilation required with BaseBackend - - def test_reset(self, eng, prog): - """Running independent programs with an engine reset in between.""" - assert not eng.run_progs - eng.run(prog) - assert len(eng.run_progs) == 1 - - eng.reset() - assert not eng.run_progs - p2 = sf.Program(3) - with p2.context as q: - ops.Rgate(1.0) | q[2] - eng.run(p2) - assert len(eng.run_progs) == 1 - - def test_regref_mismatch(self, eng): - """Running incompatible programs sequentially gives an error.""" - p1 = sf.Program(3) - p2 = sf.Program(p1) - p1.locked = False - with p1.context as q: - ops.Del | q[0] - - with pytest.raises(RuntimeError, match="Register mismatch"): - eng.run([p1, p2]) - - def test_sequential_programs(self, eng): - """Running several program segments sequentially.""" - D = ops.Dgate(0.2) - p1 = sf.Program(3) - with p1.context as q: - D | q[1] - ops.Del | q[0] - assert not eng.run_progs - eng.run(p1) - assert len(eng.run_progs) == 1 - - # p2 succeeds p1 - p2 = sf.Program(p1) - with p2.context as q: - D | q[1] - eng.run(p2) - assert len(eng.run_progs) == 2 - - # p2 does not alter the register so it can be repeated - eng.run([p2] * 3) - assert len(eng.run_progs) == 5 - - eng.reset() - assert not eng.run_progs - - def test_print_applied(self, eng): - """Tests the printing of executed programs.""" - a = 0.23 - r = 0.1 - - def inspect(): - res = [] - print_fn = lambda x: res.append(x.__str__()) - eng.print_applied(print_fn) - return res - - p1 = sf.Program(2) - with p1.context as q: - ops.Dgate(a) | q[1] - ops.Sgate(r) | q[1] - - eng.run(p1) - expected1 = ["Run 0:", "Dgate({}, 0) | (q[1])".format(a), "Sgate({}, 0) | (q[1])".format(r)] - assert inspect() == expected1 - - # run the program again - eng.reset() - eng.run(p1) - assert inspect() == expected1 - - # apply more commands to the same backend - p2 = sf.Program(2) - with p2.context as q: - ops.Rgate(r) | q[1] - - eng.run(p2) - expected2 = expected1 + ["Run 1:", "Rgate({}) | (q[1])".format(r)] - assert inspect() == expected2 - - # reapply history - eng.reset() - eng.run([p1, p2]) - assert inspect() == expected2 +def config(): + # TODO anything to do here? + return Configuration() + + +# TODO should mock an actual http server here (e.g. with `http.server`) +class MockServer: + # Fake a job processing delay + REQUESTS_BEFORE_COMPLETE = 3 + + def __init__(self): + self.request_count = 0 + + def get_job_status(self, _id): + self.request_count += 1 + return ( + JobStatus.COMPLETE + if self.request_count >= self.REQUESTS_BEFORE_COMPLETE + else JobStatus.QUEUED + ) class TestStarshipEngine: - """ - Tests various methods on the remote engine StarshipEngine. - """ - - def test_init(self, monkeypatch): - """ - Tests that a StarshipEngine instance is correctly initialized when additional APIClient - parameters are passed. - """ - mock_api_client = MagicMock() - monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine("chip0") - assert engine.client == mock_api_client() - assert engine.jobs == [] - assert engine.REMOTE == True - - def test_reset(self, starship_engine): - """ - Tests that StarshipEngine.jobs is correctly cleared when callling StarshipEngine.reset. - """ - starship_engine.jobs.append(MagicMock()) - assert len(starship_engine.jobs) == 1 - starship_engine.reset() - assert len(starship_engine.jobs) == 0 - - def test__get_blackbird(self, starship_engine, monkeypatch): - """ - Tests that StarshipEngine._get_blackbird returns the correct string given name, - shots, and program parameters. - """ - methods = MagicMock() - inputs = MagicMock() - - monkeypatch.setattr("strawberryfields.engine.to_blackbird", methods.to_blackbird) - - output = starship_engine._get_blackbird(inputs.shots, inputs.program) - - methods.to_blackbird.assert_called_once_with(inputs.program, version="1.0") - assert len(output._target.__setitem__.call_args_list) == 2 - assert output._target.__setitem__.call_args_list[0] == call( - "name", starship_engine.backend_name + def test_run_complete(self, config, prog, monkeypatch): + id_, result = "123", [[1, 2], [3, 4]] + + server = MockServer() + monkeypatch.setattr( + Connection, + "create_job", + mock_return( + Job(id_=id_, status=JobStatus.OPEN, connection=Connection(config)) + ), ) - assert output._target.__setitem__.call_args_list[1] == call( - "options", {"shots": inputs.shots} + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr(Connection, "get_job_result", mock_return(Result(result))) + + engine = StarshipEngine("chip2", connection=Connection(config)) + job = engine.run(prog) + + assert job.status == JobStatus.COMPLETE + assert job.result.samples.tolist() == result + + def test_run_cancelled(self, config, prog, monkeypatch): + server = MockServer() + # TODO how to test keyboard interrupt for cancel? + + def test_run_async(self): + server = MockServer() + # TODO + + +class TestConnection: + @pytest.fixture + def connection(self, config): + return Connection( + token=config.api["authentication_token"], + host=config.api["hostname"], + port=config.api["port"], + use_ssl=config.api["use_ssl"], ) - def test_queue_job(self, starship_engine, monkeypatch): - mock_job = MagicMock() - monkeypatch.setattr("strawberryfields.engine.Job", mock_job) - mock_job_content = MagicMock() - - result = starship_engine._queue_job(mock_job_content) - mock_job.assert_called_once_with(client=starship_engine.client) - result.manager.create.assert_called_once_with(circuit=mock_job_content) - assert starship_engine.jobs == [result] - - def test__run_program(self, starship_engine, monkeypatch): - """ - Tests StarshipEngine._run_program. Asserts that a program is converted to blackbird code, - compiled into a job content string and that the job is queued. Also asserts that a - completed job's result samples are returned. - """ - mock_to_blackbird = MagicMock() - mock__get_blackbird = MagicMock() - program = MagicMock() - mock_job = MagicMock() - mock_job.is_complete = True - mock_job.is_failed = False - - monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) - monkeypatch.setattr(starship_engine, "_get_blackbird", mock__get_blackbird) - monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) - - some_params = {"param": MagicMock()} - result = starship_engine._run_program(program, args={}, compile_options={}, **some_params) - - mock__get_blackbird.assert_called_once_with(program=program, param=some_params["param"], - args={}, compile_options={}) - - assert result == mock_job.result.result.value - - def test__run_program_fails(self, starship_engine, monkeypatch): - """ - Tests that an Exception is raised when a job has failed. - """ - mock_to_blackbird = MagicMock() - mock__get_blackbird = MagicMock() - program = MagicMock() - mock_job = MagicMock() - mock_job.is_complete = False - mock_job.is_failed = True - - monkeypatch.setattr("strawberryfields.engine.to_blackbird", mock_to_blackbird) - monkeypatch.setattr(starship_engine, "_get_blackbird", mock__get_blackbird) - monkeypatch.setattr(starship_engine, "_queue_job", lambda job_content: mock_job) - - some_params = {"param": MagicMock()} - - with pytest.raises(JobExecutionError) as e: - starship_engine._run_program(program, **some_params) - assert e.value.args[0] == str(mock_job.manager.http_response_data['meta']) - - def test__run(self, starship_engine, monkeypatch): - """ - Tests StarshipEngine._run, with the assumption that the backend is a hardware backend - that supports running only a single program. This test ensures that a program is compiled - for the hardware backend, is locked, is added to self.run_progs, that it is run and that - a Result object is returned populated with the result samples. - """ - - inputs = MagicMock() - inputs.shots = 5 - outputs = MagicMock() - methods = MagicMock() - - monkeypatch.setattr(starship_engine, "backend_name", str(inputs.mock_backend)) - monkeypatch.setattr(starship_engine, "_run_program", methods._run_program) - monkeypatch.setattr("strawberryfields.engine.Result", outputs.result) - monkeypatch.setattr(starship_engine, "backend", inputs.mock_backend) - - result = starship_engine._run(inputs.program, shots=inputs.shots, args={}, - compile_options={}) - - inputs.program.compile.assert_called_once_with(starship_engine.backend.circuit_spec) - mock_compiled_program = inputs.program.compile(starship_engine.backend_name) - mock_compiled_program.lock.assert_called_once() - methods._run_program.assert_called_once_with(mock_compiled_program, shots=inputs.shots) - assert starship_engine.samples == starship_engine._run_program( - mock_compiled_program, shots=inputs.shots + def test_create_job(self, connection, monkeypatch): + id_, status = "123", JobStatus.QUEUED + + monkeypatch.setattr( + Connection, + "_post", + mock_return(mock_response(201, {"id": id_, "status": status})), ) - assert starship_engine.run_progs == [] - assert result == outputs.result(starship_engine.samples) - - def test_run(self, starship_engine, monkeypatch): - """ - Tests StarshipEngine.run. It is expected that StarshipEngine._run is called with the correct - parameters. - """ - mock__run = MagicMock() - monkeypatch.setattr("strawberryfields.engine.BaseEngine._run", mock__run) - - inputs = MagicMock() - inputs.params = {"param": MagicMock()} - - starship_engine.run(inputs.program, inputs.shots, **inputs.params) - mock__run.assert_called_once_with( - inputs.program, shots=inputs.shots, param=inputs.params["param"], args={}, - compile_options={} + + job = connection.create_job("circuit") + + assert job.id == id_ + assert job.status == status + + def test_create_job_error(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) + + with pytest.raises(CreateJobRequestError): + connection.create_job("circuit") + + def test_get_all_jobs(self, connection, monkeypatch): + monkeypatch.setattr( + Connection, + "_get", + mock_return( + mock_response( + 200, + { + "data": [ + { + "id": str(i), + "status": JobStatus.COMPLETE, + "created_at": "2020-01-{:02d}T12:34:56.123456Z".format( + i + ), + } + for i in range(1, 10) + ] + }, + ) + ), ) - def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): - """ - This is an integration test that tests and actual program being submitted to a mock API, and - how the engine handles a successful response from the server (first by queuing a job then by - fetching the result.) - """ - - # NOTE: this is currently more of an integration test, currently a WIP / under development. - - api_client_params = {"hostname": "localhost"} - engine = StarshipEngine("chip0", polling_delay_seconds=0, **api_client_params) - - # We don't want to actually send any requests, though we should make sure POST was called - mock_api_client_post = MagicMock() - mock_get = MagicMock() - mock_get_response = MagicMock() - mock_get_response.status_code = 200 - - # Including "result" here is a little hacky, but it is here as this response is returned - # for both job.get() and job.result.get() - mock_get_response.json.return_value = {"status": "COMPLETE", "id": 1234, "result": {1: []}} - mock_get.return_value = mock_get_response - - mock_post_response = MagicMock() - mock_post_response.status_code = 201 - mock_post_response.json.return_value = {"status": "QUEUED", "id": 1234} - mock_api_client_post.return_value = mock_post_response - - monkeypatch.setattr(APIClient, "post", mock_api_client_post) - monkeypatch.setattr(APIClient, "get", mock_get) - - prog = sf.Program(4) - - sqz0 = 1.0 - sqz1 = 1.0 - phi0 = 0.574 - phi1 = 1.33 - pi = 3.14 - - with prog.context as q: - ops.S2gate(sqz0, 0.0) | (q[0], q[2]) - ops.S2gate(sqz1, 0.0) | (q[1], q[3]) - ops.Rgate(phi0) | q[0] - ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) - ops.Rgate(phi1) | q[0] - ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) - ops.Rgate(phi0) | q[2] - ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) - ops.Rgate(phi1) | q[2] - ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) - ops.MeasureFock() | q - - engine.run(prog) - - mock_api_client_post.assert_called_once() + jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) + + assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] + + def test_get_all_jobs_error(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + + with pytest.raises(GetAllJobsRequestError): + connection.get_all_jobs() + + def test_get_job(self, connection, monkeypatch): + id_, status = "123", JobStatus.COMPLETE + + monkeypatch.setattr( + Connection, + "_get", + mock_return(mock_response(200, {"id": id_, "status": status.value})), + ) + + job = connection.get_job(id_) + + assert job.id == id_ + assert job.status == status + + def test_get_job_error(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + + with pytest.raises(GetJobRequestError): + connection.get_job("123") + + def test_get_job_status(self, connection, monkeypatch): + id_, status = "123", JobStatus.COMPLETE + + monkeypatch.setattr( + Connection, + "_get", + mock_return(mock_response(200, {"id": id_, "status": status.value})), + ) + + assert connection.get_job_status(id_) == status + + def test_get_job_status_error(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + + with pytest.raises(GetJobRequestError): + connection.get_job_status("123") + + def test_get_job_result(self, connection, monkeypatch): + result_samples = [[1, 2], [3, 4]] + + monkeypatch.setattr( + Connection, + "_get", + mock_return(mock_response(200, {"result": result_samples})), + ) + + result = connection.get_job_result("123") + + assert result.samples.tolist() == result_samples + + def test_get_job_result_error(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + + with pytest.raises(GetJobResultRequestError): + connection.get_job_result("123") From f4242b1a8e98ad17cea3511407f68dccb2dbae5e Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 12:10:08 -0500 Subject: [PATCH 116/335] Result.samples should return (num_modes, num_shots); transpose values in tests accordingly --- strawberryfields/engine.py | 5 +++-- tests/frontend/test_engine.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index d12386d09..387fed803 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -91,9 +91,10 @@ def __init__(self, samples): self._state = None # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - # TODO adjust the format as specified in the ADR if len(np.shape(samples)) > 1: - samples = np.vstack(samples) + samples = np.stack(samples, 1) + # TODO what shape should this have exactly? + # samples = np.vstack(samples) self._samples = samples @property diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index c1574b54b..e967226a8 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -105,7 +105,7 @@ def test_run_complete(self, config, prog, monkeypatch): job = engine.run(prog) assert job.status == JobStatus.COMPLETE - assert job.result.samples.tolist() == result + assert job.result.samples.T.tolist() == result def test_run_cancelled(self, config, prog, monkeypatch): server = MockServer() @@ -227,7 +227,7 @@ def test_get_job_result(self, connection, monkeypatch): result = connection.get_job_result("123") - assert result.samples.tolist() == result_samples + assert result.samples.T.tolist() == result_samples def test_get_job_result_error(self, connection, monkeypatch): monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) From 59bce20192f283656662728ec14f3ba982b7297e Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 15:41:09 -0500 Subject: [PATCH 117/335] More docs, cleanup --- strawberryfields/engine.py | 66 +++++++++++++++++++++++++++++++---- tests/frontend/test_engine.py | 30 ++++++---------- 2 files changed, 69 insertions(+), 27 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 387fed803..ed6d76f9b 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -630,7 +630,8 @@ class StarshipEngine: Args: target (str): the target backend - connection (Connection): a connection to the remote job execution platform + connection (strawberryfields.engine.Connection): a connection to the remote job + execution platform """ POLLING_INTERVAL_SECONDS = 1 @@ -681,7 +682,7 @@ def run(self, program, shots=1): shots (int): the number of shots for which to run the job Returns: - Job: the resulting remote job + strawberryfields.engine.Job: the resulting remote job """ job = self.run_async(program) try: @@ -707,7 +708,7 @@ def run_async(self, program, shots=1): shots (int): the number of shots for which to run the job Returns: - Job: the created remote job + strawberryfields.engine.Job: the created remote job """ bb = to_blackbird(program) bb._target["name"] = self._target @@ -774,6 +775,14 @@ def base_url(self): # TODO think about using serializers for the request wrappers - future PR maybe? def create_job(self, circuit): + """Creates a job with the given circuit. + + Args: + circuit (str): the serialized Blackbird program + + Returns: + strawberryfields.engine.Job: the created job + """ response = self._post("/jobs", data=json.dumps({"circuit": circuit})) if response.status_code == 201: return Job( @@ -784,6 +793,15 @@ def create_job(self, circuit): raise CreateJobRequestError(self._request_error_message(response)) def get_all_jobs(self, after=datetime(1970, 1, 1)): + """Gets all jobs created by the user, optionally filtered by datetime. + + Args: + after (datetime.datetime): if provided, only jobs more recent than `after` + are returned + + Returns: + List[strawberryfields.engine.Job]: the jobs + """ # TODO figure out how to handle pagination from the user's perspective (if at all) # TODO tentative until corresponding feature on platform side is finalized response = self._get("/jobs?page[size]={}".format(self.MAX_JOBS_REQUESTED)) @@ -797,6 +815,14 @@ def get_all_jobs(self, after=datetime(1970, 1, 1)): raise GetAllJobsRequestError(self._request_error_message(response)) def get_job(self, job_id): + """Gets a job. + + Args: + job_id (str): the job UUID + + Returns: + strawberryfields.engine.Job: the job + """ response = self._get("/jobs/{}".format(job_id)) if response.status_code == 200: return Job( @@ -807,9 +833,25 @@ def get_job(self, job_id): raise GetJobRequestError(self._request_error_message(response)) def get_job_status(self, job_id): + """Returns the status of a job. + + Args: + job_id (str): the job UUID + + Returns: + strawberryfields.engine.JobStatus: the job status + """ return JobStatus(self.get_job(job_id).status) def get_job_result(self, job_id): + """Returns the result of a job. + + Args: + job_id (str): the job UUID + + Returns: + strawberryfields.engine.Result: the job result + """ # TODO get numpy here? response = self._get("/jobs/{}/result".format(job_id)) if response.status_code == 200: @@ -824,11 +866,16 @@ def get_job_circuit(self, job_id): raise GetJobCircuitRequestError(self._request_error_message(response)) def cancel_job(self, job_id): + """Cancels a job. + + Args: + job_id (str): the job UUID + """ response = self._patch( "/jobs/{}".format(job_id), body={"status", JobStatus.CANCELLED.value} ) if response.status_code == 204: - return True + return raise CancelJobRequestError(self._request_error_message(response)) def _get(self, path, **kwargs): @@ -857,6 +904,7 @@ def _request_error_message(self, response): class RequestMethod(enum.Enum): """Defines the valid request methods for messages sent to the remote job platform. """ + GET = "get" POST = "post" PATCH = "patch" @@ -870,8 +918,9 @@ class Job: Args: id_ (str): the job UUID - status (JobStatus): the job status - connection (Connection): the connection over which the job is managed + status (strawberryfields.engine.JobStatus): the job status + connection (strawberryfields.engine.Connection): the connection over which the + job is managed """ def __init__(self, id_, status, connection): @@ -899,7 +948,7 @@ def result(self): any other status. Returns: - Result: the result + strawberryfields.engine.Result: the result """ if self.status != JobStatus.COMPLETE: raise JobNotCompleteError( @@ -911,6 +960,9 @@ def result(self): def refresh(self): """Refreshes the status of the job, along with the job result if the job is newly completed. + + Only a non-terminal (open or queued job) can be refreshed; a + `RefreshTerminalJobError` is raised otherwise. """ if self.status.is_terminal: raise RefreshTerminalJobError( diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index e967226a8..be2323001 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -109,7 +109,7 @@ def test_run_complete(self, config, prog, monkeypatch): def test_run_cancelled(self, config, prog, monkeypatch): server = MockServer() - # TODO how to test keyboard interrupt for cancel? + # TODO def test_run_async(self): server = MockServer() @@ -147,26 +147,16 @@ def test_create_job_error(self, connection, monkeypatch): connection.create_job("circuit") def test_get_all_jobs(self, connection, monkeypatch): + jobs = [ + { + "id": str(i), + "status": JobStatus.COMPLETE, + "created_at": "2020-01-{:02d}T12:34:56.123456Z".format(i), + } + for i in range(1, 10) + ] monkeypatch.setattr( - Connection, - "_get", - mock_return( - mock_response( - 200, - { - "data": [ - { - "id": str(i), - "status": JobStatus.COMPLETE, - "created_at": "2020-01-{:02d}T12:34:56.123456Z".format( - i - ), - } - for i in range(1, 10) - ] - }, - ) - ), + Connection, "_get", mock_return(mock_response(200, {"data": jobs})), ) jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) From ace121303a2d844cb7387344409a5a5b65eadc86 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 16:07:03 -0500 Subject: [PATCH 118/335] engine.run() should return result instead of job --- strawberryfields/engine.py | 18 +++++++----------- tests/frontend/test_engine.py | 5 ++--- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index ed6d76f9b..4f876c519 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -608,16 +608,13 @@ class StarshipEngine: engine = StarshipEngine("chip2") # Run a job synchronously - job = engine.run(program, shots=1) + result = engine.run(program, shots=1) # (Engine blocks until job is complete) - job.status # JobStatus.COMPLETE - job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] + result # [[0, 1, 0, 2, 1, 0, 0, 0]] # Run a job synchronously, but cancel it before it is completed - job = engine.run(program, shots=1) + _ = engine.run(program, shots=1) ^C # KeyboardInterrupt cancels the job - job.status # "cancelled" - job.result # JobCancelledError # Run a job asynchronously job = engine.run_async(program, shots=1) @@ -682,7 +679,7 @@ def run(self, program, shots=1): shots (int): the number of shots for which to run the job Returns: - strawberryfields.engine.Job: the resulting remote job + strawberryfields.engine.Result: the job result """ job = self.run_async(program) try: @@ -690,12 +687,11 @@ def run(self, program, shots=1): while True: job.refresh() if job.status in (JobStatus.COMPLETE, JobStatus.FAILED): - return job + return job.result time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job_id) - job.status = JobStatus.CANCELLED - return job + return def run_async(self, program, shots=1): """Runs a remote job asynchronously. @@ -704,7 +700,7 @@ def run_async(self, program, shots=1): manually refresh the status of the job. Args: - program (Program): the quantum circuit + program (strawberryfields.Program): the quantum circuit shots (int): the number of shots for which to run the job Returns: diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index be2323001..808451f3a 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -102,10 +102,9 @@ def test_run_complete(self, config, prog, monkeypatch): monkeypatch.setattr(Connection, "get_job_result", mock_return(Result(result))) engine = StarshipEngine("chip2", connection=Connection(config)) - job = engine.run(prog) + job_result = engine.run(prog) - assert job.status == JobStatus.COMPLETE - assert job.result.samples.T.tolist() == result + assert job_result.samples.T.tolist() == result def test_run_cancelled(self, config, prog, monkeypatch): server = MockServer() From d4e6576b6856022c3ccd036084005c44c9532e99 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 16:21:34 -0500 Subject: [PATCH 119/335] Result.state raises error for stateless computation --- strawberryfields/engine.py | 12 +++++++----- tests/frontend/test_engine.py | 15 +++++++++++---- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 4f876c519..9614043fd 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -46,8 +46,8 @@ class OneJobAtATimeError(Exception): class Result: """Result of a quantum computation. - Represents the results of the execution of a quantum program - returned by the :meth:`.LocalEngine.run` method. + Represents the results of the execution of a quantum program on a local or + remote backend. The returned :class:`~Result` object provides several useful properties for accessing the results of your program execution: @@ -87,8 +87,9 @@ class Result: but the return value of ``Result.state`` will be ``None``. """ - def __init__(self, samples): + def __init__(self, samples, is_stateful=True): self._state = None + self._is_stateful = is_stateful # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array if len(np.shape(samples)) > 1: @@ -131,7 +132,8 @@ def state(self): Returns: BaseState: quantum state returned from program execution """ - # TODO raise error if called for remote job + if not self._is_stateful: + raise AttributeError("The state is undefined for a stateless computation.") return self._state def __str__(self): @@ -851,7 +853,7 @@ def get_job_result(self, job_id): # TODO get numpy here? response = self._get("/jobs/{}/result".format(job_id)) if response.status_code == 200: - return Result(response.json()["result"]) + return Result(response.json()["result"], is_stateful=False) raise GetJobResultRequestError(self._request_error_message(response)) # TODO is this necessary? diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 808451f3a..2c7493ceb 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -88,7 +88,7 @@ def get_job_status(self, _id): class TestStarshipEngine: def test_run_complete(self, config, prog, monkeypatch): - id_, result = "123", [[1, 2], [3, 4]] + id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() monkeypatch.setattr( @@ -99,12 +99,19 @@ def test_run_complete(self, config, prog, monkeypatch): ), ) monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr(Connection, "get_job_result", mock_return(Result(result))) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) engine = StarshipEngine("chip2", connection=Connection(config)) - job_result = engine.run(prog) + result = engine.run(prog) + + assert result.samples.T.tolist() == result_expected - assert job_result.samples.T.tolist() == result + with pytest.raises(AttributeError): + result.state def test_run_cancelled(self, config, prog, monkeypatch): server = MockServer() From 0efccb69691361937bad3a50b57436d908bad872 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 19 Feb 2020 16:25:01 -0500 Subject: [PATCH 120/335] First draft --- strawberryfields/configuration.py | 46 +++++++++++++++++++++++----- tests/frontend/test_configuration.py | 11 +++++++ 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index cf1c5cc9d..08463f478 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -55,16 +55,43 @@ class ConfigurationError(Exception): """Exception used for configuration errors""" -_user_config_dir = user_config_dir("strawberryfields", "Xanadu") -_env_config_dir = os.environ.get("SF_CONF", "") - # This function will be used by the Connection object -def read_config(name="config.toml", **kwargs): +def load_config(name="config.toml", **kwargs): _name = name + _config = create_config_object(**kwargs) + + _config = update_from_config_file(_config) + _config = update_from_environmental_variables(_config) + + +def create_config_object(**kwargs): + authentication_token = kwargs.get("authentication_token", "") + hostname = kwargs.get("hostname", "localhost") + use_ssl = kwargs.get("use_ssl", True) + port = kwargs.get("port", 443) + debug = kwargs.get("debug", False) + + config = { + "api": { + "authentication_token": authentication_token, + "hostname": hostname, + "use_ssl": use_ssl, + "port": port, + "debug": debug + } + } + return config + +def update_from_config_file(config): + + current_dir = os.getcwd() + env_config_dir = os.environ.get("SF_CONF", "") + user_config_dir = user_config_dir("strawberryfields", "Xanadu") + # Search the current directory, the directory under environment # variable SF_CONF, and default user config directory, in that order. - directories = [os.getcwd(), _env_config_dir, _user_config_dir] + directories = [current_dir, env_config_dir, user_config_dir] for directory in directories: _filepath = os.path.join(directory, _name) try: @@ -79,8 +106,11 @@ def read_config(name="config.toml", **kwargs): log.info("No Strawberry Fields configuration file found.") # TODO: add logic for parsing from environmental variables -# This function will be user-facing -# calling on the save_config function + return config + +def update_from_environmental_variables(_config): + +# calling on the write_config_file function def write_config_file(name="config.toml", path=_user_config_dir, **kwargs): # TODO: create a config object similar to DEFAULT_CONFIG @@ -222,3 +252,5 @@ def save(self, filepath): """ with open(filepath, "w") as f: toml.dump(self._config, f) + +configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 847b96731..f29094d0f 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -74,6 +74,8 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): def test_loading_env_variable(self, tmpdir): """Test that the default configuration file can be loaded via an environment variable.""" + # TODO: This test does not work if there is already a configuration + # file in place filename = tmpdir.join("config.toml") with open(filename, "w") as f: @@ -88,6 +90,14 @@ def test_loading_env_variable(self, tmpdir): def test_loading_absolute_path(self, tmpdir, monkeypatch): """Test that the default configuration file can be loaded via an absolute path.""" + # TODO: Some state seems to be left hereThis test does not work if + # there is already a configuration file in place + # {'api': {'authentication_token': '071cdcce-9241-4965-93af-4a4dbc739135', + # 'hostname': 'localhost', 'use_ssl': True, 'port': '443', 'debug': False}} + # {'api': {'authentication_token': '071cdcce-9241-4965-93af-4a4dbc739135', + # 'hostname': 'localhost', 'use_ssl': True, 'debug': False, 'port': 443}} + + # config._config seems to output a string at times filename = os.path.abspath(tmpdir.join("config.toml")) with open(filename, "w") as f: @@ -96,6 +106,7 @@ def test_loading_absolute_path(self, tmpdir, monkeypatch): os.environ["SF_CONF"] = "" config = conf.Configuration(name=str(filename)) + print(config._config, EXPECTED_CONFIG) assert config._config == EXPECTED_CONFIG assert config.path == filename From 43f04ae863dceb68e1bf35df4daa62569f721adc Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 16:31:02 -0500 Subject: [PATCH 121/335] Add test for async job --- tests/frontend/test_engine.py | 60 ++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 2c7493ceb..730c4123a 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -69,7 +69,17 @@ def config(): return Configuration() -# TODO should mock an actual http server here (e.g. with `http.server`) +@pytest.fixture +def connection(config): + return Connection( + token=config.api["authentication_token"], + host=config.api["hostname"], + port=config.api["port"], + use_ssl=config.api["use_ssl"], + ) + + +# TODO explore mocking an actual http server here (e.g. with `http.server`) class MockServer: # Fake a job processing delay REQUESTS_BEFORE_COMPLETE = 3 @@ -87,16 +97,14 @@ def get_job_status(self, _id): class TestStarshipEngine: - def test_run_complete(self, config, prog, monkeypatch): + def test_run_complete(self, connection, prog, monkeypatch): id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() monkeypatch.setattr( Connection, "create_job", - mock_return( - Job(id_=id_, status=JobStatus.OPEN, connection=Connection(config)) - ), + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), ) monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) monkeypatch.setattr( @@ -105,7 +113,7 @@ def test_run_complete(self, config, prog, monkeypatch): mock_return(Result(result_expected, is_stateful=False)), ) - engine = StarshipEngine("chip2", connection=Connection(config)) + engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) assert result.samples.T.tolist() == result_expected @@ -113,25 +121,41 @@ def test_run_complete(self, config, prog, monkeypatch): with pytest.raises(AttributeError): result.state - def test_run_cancelled(self, config, prog, monkeypatch): + def test_run_cancelled(self, connection, prog, monkeypatch): server = MockServer() # TODO - def test_run_async(self): + def test_run_async(self, connection, prog, monkeypatch): + id_, result_expected = "123", [[1, 2], [3, 4]] + server = MockServer() - # TODO + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), + ) + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) + engine = StarshipEngine("chip2", connection=connection) + job = engine.run_async(prog) + job.status == JobStatus.OPEN -class TestConnection: - @pytest.fixture - def connection(self, config): - return Connection( - token=config.api["authentication_token"], - host=config.api["hostname"], - port=config.api["port"], - use_ssl=config.api["use_ssl"], - ) + for _ in range(server.REQUESTS_BEFORE_COMPLETE): + job.refresh() + assert job.status == JobStatus.COMPLETE + assert job.result.samples.T.tolist() == result_expected + + with pytest.raises(AttributeError): + job.result.state + + +class TestConnection: def test_create_job(self, connection, monkeypatch): id_, status = "123", JobStatus.QUEUED From be5c927570dfe12c79c6da04204aac5f778f23dc Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 19 Feb 2020 17:16:22 -0500 Subject: [PATCH 122/335] Simplify starship CLI --- starship | 95 ++++++++++---------------------------- strawberryfields/engine.py | 20 +++++++- 2 files changed, 42 insertions(+), 73 deletions(-) diff --git a/starship b/starship index bffc2948a..efb2033df 100755 --- a/starship +++ b/starship @@ -20,100 +20,53 @@ import sys import argparse import pdb -from strawberryfields.engine import StarshipEngine -from strawberryfields.api_client import APIClient +from strawberryfields.engine import StarshipEngine, Connection from strawberryfields.io import load -from strawberryfields import configuration -PROMPTS = { - "hostname": "Please enter the hostname of the server to connect to: [{}] ", - "port": "Please enter the port number to connect with: [{}] ", - "use_ssl": "Should the client attempt to connect over SSL? [{}] ", - "authentication_token": "Please enter the authentication token to use when connecting: [{}] ", - "save": "Would you like to save these settings to a local cofiguration file in the current " - "directory? [{}] ", -} if __name__ == "__main__": - parser = argparse.ArgumentParser(description="run a blackbird script on StarshipEngine") + parser = argparse.ArgumentParser( + description="run a blackbird script on StarshipEngine" + ) + parser.add_argument( + "--token", "-t", help="the API authentication token", required=True + ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--input", "-i", help="the xbb file to run") + group.add_argument("--hello", action="store_true", help="test the API connection") parser.add_argument( "--output", "-o", help="where to output the result of the program - outputs to stdout by default", ) parser.add_argument( - "--debug", action="store_true", help="returns a pdb shell after executing the program" - ) - group.add_argument( - "--reconfigure", + "--debug", action="store_true", - help="an interactive tool to reconfigure the API connection before executing the program", - ) - group.add_argument( - "--hello", - action="store_true", - help="test the API connection", + help="returns a pdb shell after executing the program", ) args = parser.parse_args() - if args.reconfigure: - config = configuration.Configuration() - - hostname = ( - input(PROMPTS["hostname"].format(config.api["hostname"])) or config.api["hostname"] - ) - authentication_token = ( - input(PROMPTS["authentication_token"].format(config.api["authentication_token"])) - or config.api["authentication_token"] - ) - port = input(PROMPTS["port"].format(config.api["port"])) or config.api["port"] - use_ssl = input(PROMPTS["use_ssl"].format("y" if config.api["use_ssl"] else "n")).upper() == "Y" - save = input(PROMPTS["save"].format("y" if config.api["use_ssl"] else "n")).upper() == "Y" - - if not save: - sys.stdout.write("Not writing configuration to file...\n") - else: - if not os.path.isfile("config.toml"): - sys.stdout.write("Writing configuration file to current working directory...\n") - else: - sys.stdout.write("Updating configuration in current working directory...\n") - - config.api["hostname"] = hostname - config.api["authentication_token"] = authentication_token - config.api["port"] = port - config.api["use_ssl"] = use_ssl - config.save("config.toml") - sys.exit() - elif args.hello: - client = APIClient() - client.BASE_URL = "https://platform.strawberryfields.ai/healthz" - try: - response = client.get("") - except Exception as e: - sys.stderr.write("Could not connect to server:\n{}\n".format(e)) - sys.exit(1) + connection = Connection( + token=args.token, + host="platform.strawberryfields.ai", + port="443", + use_ssl=True, + debug=False, + ) - if response.status_code == 200: + if args.hello: + try: + connection.ping() sys.stdout.write("You have successfully authenticated to the platform!\n") sys.exit() - elif response.status_code in (401, 403): - if not client.AUTHENTICATION_TOKEN: - sys.stderr.write("Could not authenticate -- no token provided\n") - else: - sys.stderr.write( - "Could not authenticate with token {} - please try again\n".format( - client.AUTHENTICATION_TOKEN)) - sys.exit(1) - else: - sys.stderr.write("Could not connect to server: {}, {}\n".format( - str(response.status_code), response.content)) + except Exception as e: + sys.stderr.write("Could not connect to server:\n{}\n".format(e)) sys.exit(1) program = load(args.input) - eng = StarshipEngine(program.target) + + eng = StarshipEngine("chip2", connection) result = eng.run(program) if result and result.samples is not None: diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 9614043fd..a566d4226 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -596,6 +596,10 @@ class CancelTerminalJobError(Exception): """Raised when attempting to cancel a completed, failed, or cancelled job.""" +class PingFailedError(Exception): + """Raised when a ping request to a remote backend is unsuccessful.""" + + class StarshipEngine: """A quantum program executor engine that that provides a simple interface for running remote jobs in a synchronous or asynchronous manner. @@ -690,9 +694,10 @@ def run(self, program, shots=1): job.refresh() if job.status in (JobStatus.COMPLETE, JobStatus.FAILED): return job.result + print(job.status) time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: - self._connection.cancel_job(job_id) + self._connection.cancel_job(job.id) return def run_async(self, program, shots=1): @@ -870,12 +875,23 @@ def cancel_job(self, job_id): job_id (str): the job UUID """ response = self._patch( - "/jobs/{}".format(job_id), body={"status", JobStatus.CANCELLED.value} + "/jobs/{}".format(job_id), data={"status", JobStatus.CANCELLED.value} ) if response.status_code == 204: return raise CancelJobRequestError(self._request_error_message(response)) + def ping(self): + """Tests the connection to the remote backend. + + Returns: + bool: True if the connection is successful, and False otherwise + """ + response = self._get("/healthz") + if response.status_code == 200: + return + raise PingFailedError(self._request_error_message(response)) + def _get(self, path, **kwargs): return self._request(RequestMethod.GET, path, **kwargs) From b24cf33f3c31947085a4955c7f153cb9f8af1bb0 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 11:02:51 -0500 Subject: [PATCH 123/335] Fix bug with output logic --- starship | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/starship b/starship index efb2033df..c647b5bbd 100755 --- a/starship +++ b/starship @@ -33,7 +33,9 @@ if __name__ == "__main__": ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--input", "-i", help="the xbb file to run") - group.add_argument("--hello", action="store_true", help="test the API connection") + group.add_argument( + "--ping", "-p", action="store_true", help="test the API connection" + ) parser.add_argument( "--output", "-o", @@ -55,7 +57,7 @@ if __name__ == "__main__": debug=False, ) - if args.hello: + if args.ping: try: connection.ping() sys.stdout.write("You have successfully authenticated to the platform!\n") @@ -70,7 +72,7 @@ if __name__ == "__main__": result = eng.run(program) if result and result.samples is not None: - if hasattr(args, "output"): + if args.output: with open(args.output, "w") as file: file.write(str(result.samples.T)) else: From 24952e0b1ff83eeee80c1cccf40c808d058a380f Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 20 Feb 2020 11:13:38 -0500 Subject: [PATCH 124/335] Logic of load_config before testing --- strawberryfields/configuration.py | 192 ++++++------------------------ 1 file changed, 38 insertions(+), 154 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 08463f478..7ac0ffc4a 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -19,7 +19,7 @@ import logging as log import toml -from appdirs import user_config_dir +from appdirs import userconfig_dir log.getLogger() @@ -56,14 +56,20 @@ class ConfigurationError(Exception): # This function will be used by the Connection object -def load_config(name="config.toml", **kwargs): - _name = name +def load_config(filename="config.toml", **kwargs): - _config = create_config_object(**kwargs) + config = create_config_object(**kwargs) - _config = update_from_config_file(_config) - _config = update_from_environmental_variables(_config) + config_file = look_for_config_file(filename=filename) + if config_file is not None: + config = update_config_from_config_file(config, config_file) + else: + log.info("No Strawberry Fields configuration file found.") + + config = update_config_from_environmental_variables(_config) + + return config def create_config_object(**kwargs): authentication_token = kwargs.get("authentication_token", "") @@ -83,58 +89,42 @@ def create_config_object(**kwargs): } return config -def update_from_config_file(config): +def update_config_from_config_file(config, config_file): - current_dir = os.getcwd() - env_config_dir = os.environ.get("SF_CONF", "") - user_config_dir = user_config_dir("strawberryfields", "Xanadu") + # Here an example for sectionconfig is api + for section, sectionconfig in _config.items(): + for key in sectionconfig: + if key in config_file[section]: + # Update from configuration file + config[section][key] = config_file[section][key] + + return config +def look_for_config_file(filename="config.toml"): # Search the current directory, the directory under environment # variable SF_CONF, and default user config directory, in that order. - directories = [current_dir, env_config_dir, user_config_dir] + current_dir = os.getcwd() + envconfig_dir = os.environ.get("SF_CONF", "") + userconfig_dir = user_config_dir("strawberryfields", "Xanadu") + + directories = [current_dir, envconfig_dir, user_config_dir] for directory in directories: - _filepath = os.path.join(directory, _name) + filepath = os.path.join(directory, filename) try: - config = load_config_file.load(_filepath) + config_file = load_config_file(filepath) + break except FileNotFoundError: - log.info("No Strawberry Fields configuration file found.") - config = False - - if config: - self.update_config() - else: - log.info("No Strawberry Fields configuration file found.") - # TODO: add logic for parsing from environmental variables - - return config - -def update_from_environmental_variables(_config): - -# calling on the write_config_file function -def write_config_file(name="config.toml", path=_user_config_dir, **kwargs): + config_file = None - # TODO: create a config object similar to DEFAULT_CONFIG - save_config_file(path, config) + return config_file -def update_config(_config): - """Updates the configuration from either a loaded configuration - file, or from an environment variable. - - The environment variable takes precedence.""" - for section, section_config in _config.items(): +def update_config_from_environmental_variables(config): + for section, sectionconfig in config.items(): env_prefix = "SF_{}_".format(section.upper()) - - for key in section_config: - # Environment variables take precedence + for key in sectionconfig: env = env_prefix + key.upper() - if env in os.environ: - # Update from environment variable - _config[section][key] = parse_environment_variable(env, os.environ[env]) - elif _config_file and key in _config_file[section]: - # Update from configuration file - _config[section][key] = _config_file[section][key] - return _config + config[section][key] = parse_environment_variable(env, os.environ[env]) def load_config_file(filepath): """Load a configuration file. @@ -143,114 +133,8 @@ def load_config_file(filepath): filepath (str): path to the configuration file """ with open(filepath, "r") as f: - _config_file = toml.load(f) - - return _config_file - -def save_config_file(filepath, config): - """Save a configuration file. - - Args: - filepath (str): path to the configuration file - config (dict of str: dict of str: Union[boolean, str, float]) - """ - with open(filepath, "w") as f: - toml.dump(config, f) - -class Configuration: - """Configuration class. - - This class is responsible for loading, saving, and storing StrawberryFields - and plugin/device configurations. - - Args: - name (str): filename of the configuration file. - This should be a valid TOML file. You may also pass an absolute - or a relative file path to the configuration file. - """ - - def __str__(self): - return "{}".format(self._config) - - def __repr__(self): - return "Strawberry Fields Configuration <{}>".format(self._filepath) - - def __init__(self, name="config.toml"): - # Look for an existing configuration file - self._config = DEFAULT_CONFIG - self._config_file = {} - self._filepath = None - self._name = name - self._user_config_dir = user_config_dir("strawberryfields", "Xanadu") - self._env_config_dir = os.environ.get("SF_CONF", "") - - # Search the current directory, the directory under environment - # variable SF_CONF, and default user config directory, in that order. - directories = [os.getcwd(), self._env_config_dir, self._user_config_dir] - for directory in directories: - self._filepath = os.path.join(directory, self._name) - try: - config = self.load(self._filepath) - break - except FileNotFoundError: - config = False - - if config: - self.update_config() - else: - log.info("No Strawberry Fields configuration file found.") - - def update_config(self): - """Updates the configuration from either a loaded configuration - file, or from an environment variable. - - The environment variable takes precedence.""" - for section, section_config in self._config.items(): - env_prefix = "SF_{}_".format(section.upper()) - - for key in section_config: - # Environment variables take precedence - env = env_prefix + key.upper() - - if env in os.environ: - # Update from environment variable - self._config[section][key] = parse_environment_variable(env, os.environ[env]) - elif self._config_file and key in self._config_file[section]: - # Update from configuration file - self._config[section][key] = self._config_file[section][key] - - def __getattr__(self, section): - if section in self._config: - return self._config[section] - - raise ConfigurationError("Unknown Strawberry Fields configuration section.") - - @property - def path(self): - """Return the path of the loaded configuration file. - - Returns: - str: If no configuration is loaded, this returns ``None``.""" - return self._filepath - - def load(self, filepath): - """Load a configuration file. - - Args: - filepath (str): path to the configuration file - """ - with open(filepath, "r") as f: - self._config_file = toml.load(f) - - return self._config_file - - def save(self, filepath): - """Save a configuration file. + config_file = toml.load(f) - Args: - filepath (str): path to the configuration file - """ - with open(filepath, "w") as f: - toml.dump(self._config, f) + return config_file configuration = load_config() From 64da6597dcc78179212961752cd9509b0a72924b Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 11:43:34 -0500 Subject: [PATCH 125/335] Linting and cleanup --- starship | 10 +--- strawberryfields/engine.py | 102 +++++++++++++++++++++---------------- 2 files changed, 59 insertions(+), 53 deletions(-) diff --git a/starship b/starship index c647b5bbd..0a7a3fcd1 100755 --- a/starship +++ b/starship @@ -41,18 +41,13 @@ if __name__ == "__main__": "-o", help="where to output the result of the program - outputs to stdout by default", ) - parser.add_argument( - "--debug", - action="store_true", - help="returns a pdb shell after executing the program", - ) args = parser.parse_args() connection = Connection( token=args.token, host="platform.strawberryfields.ai", - port="443", + port=443, use_ssl=True, debug=False, ) @@ -77,6 +72,3 @@ if __name__ == "__main__": file.write(str(result.samples.T)) else: sys.stdout.write(str(result.samples.T)) - - if args.debug: - pdb.set_trace() diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index a566d4226..40afef4ef 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -22,17 +22,16 @@ from datetime import datetime import enum import json -import requests import time from urllib.parse import urljoin import numpy as np - -from .backends import load_backend -from .backends.base import NotApplicableError, BaseBackend +import requests from strawberryfields.configuration import Configuration from strawberryfields.io import to_blackbird +from .backends import load_backend +from .backends.base import NotApplicableError, BaseBackend class OneJobAtATimeError(Exception): @@ -94,8 +93,6 @@ def __init__(self, samples, is_stateful=True): # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array if len(np.shape(samples)) > 1: samples = np.stack(samples, 1) - # TODO what shape should this have exactly? - # samples = np.vstack(samples) self._samples = samples @property @@ -578,11 +575,6 @@ class GetJobResultRequestError(Exception): """ -class GetJobCircuitRequestError(Exception): - """Raised when a request to get a job circuit fails. - """ - - class CancelJobRequestError(Exception): """Raised when a request to cancel a job fails. """ @@ -596,6 +588,10 @@ class CancelTerminalJobError(Exception): """Raised when attempting to cancel a completed, failed, or cancelled job.""" +class ResultOfIncompleteJobError(Exception): + """Raised when attempting to access the result of an incomplete job.""" + + class PingFailedError(Exception): """Raised when a ping request to a remote backend is unsuccessful.""" @@ -644,7 +640,7 @@ def __init__(self, target, connection=None): if target not in self.VALID_TARGETS: raise InvalidEngineTargetError("Invalid engine target: {}".format(target)) if connection is None: - # TODO use the global config once implemented + # TODO update this when config is implemented config = Configuration().api connection = Connection( token=config["authentication_token"], @@ -675,7 +671,7 @@ def connection(self): return self._connection def run(self, program, shots=1): - """Runs a remote job synchronously. + """Runs a remote job synchronously. In this synchronous mode, the engine blocks until the job is completed, failed, or cancelled, at which point the `Job` is returned. @@ -687,7 +683,7 @@ def run(self, program, shots=1): Returns: strawberryfields.engine.Result: the job result """ - job = self.run_async(program) + job = self.run_async(program, shots) try: # TODO worth setting a timeout here? while True: @@ -698,7 +694,6 @@ def run(self, program, shots=1): time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) - return def run_async(self, program, shots=1): """Runs a remote job asynchronously. @@ -714,9 +709,10 @@ def run_async(self, program, shots=1): strawberryfields.engine.Job: the created remote job """ bb = to_blackbird(program) - bb._target["name"] = self._target + # pylint: disable=protected-access + bb._target["name"] = self.target + # pylint: disable=protected-access bb._target["options"] = {"shots": shots} - # bb._target["options"] = {"shots": shots, **program.backend_options} return self._connection.create_job(bb.serialize()) @@ -734,11 +730,8 @@ class Connection: # TODO adjust this MAX_JOBS_REQUESTED = 100 JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - USER_AGENT = "strawberryfields-client/0.1" - def __init__( - self, token, host=None, port=None, use_ssl=None, debug=None, - ): + def __init__(self, token, host=None, port=None, use_ssl=None): # TODO use `read_config` when implemented # e.g. read_config(host="abc", port=123) @@ -746,31 +739,50 @@ def __init__( self._host = host self._port = port self._use_ssl = use_ssl - # TODO what is this used for? - self._debug = debug @property def token(self): + """The API authentication token. + + Returns: + str: the API token + """ return self._token @property def host(self): + """The host for the remote backend. + + Returns: + str: the hostname + """ return self._host @property def port(self): + """The port to connect to on the remote host. + + Returns: + int: the port number + """ return self._port @property def use_ssl(self): - return self._use_ssl + """Whether to use SSL for the connection. - @property - def debug(self): - return self._debug + Returns: + bool: True if SSL should be used, and False otherwise + """ + return self._use_ssl @property def base_url(self): + """The base URL used for the connection. + + Returns: + str: the base url + """ return "http{}://{}:{}".format( "s" if self.use_ssl else "", self.host, self.port ) @@ -799,8 +811,8 @@ def get_all_jobs(self, after=datetime(1970, 1, 1)): """Gets all jobs created by the user, optionally filtered by datetime. Args: - after (datetime.datetime): if provided, only jobs more recent than `after` - are returned + after (datetime.datetime): if provided, only jobs more recently created + then `after` are returned Returns: List[strawberryfields.engine.Job]: the jobs @@ -861,13 +873,6 @@ def get_job_result(self, job_id): return Result(response.json()["result"], is_stateful=False) raise GetJobResultRequestError(self._request_error_message(response)) - # TODO is this necessary? - def get_job_circuit(self, job_id): - response = self._get("/jobs/{}/circuit".format(job_id)) - if response.status_code == 200: - return response.json()["circuit"] - raise GetJobCircuitRequestError(self._request_error_message(response)) - def cancel_job(self, job_id): """Cancels a job. @@ -904,11 +909,12 @@ def _patch(self, path, **kwargs): def _request(self, method, path, **kwargs): return getattr(requests, method.value)( urljoin(self.base_url, path), - headers={"Authorization": self.token, "User-Agent": self.USER_AGENT}, + headers={"Authorization": self.token}, **kwargs ) - def _request_error_message(self, response): + @staticmethod + def _request_error_message(response): body = response.json() return "{} ({}): {}".format( body.get("status_code", ""), body.get("code", ""), body.get("detail", "") @@ -931,7 +937,7 @@ class Job: `Connection` when a job is run. Args: - id_ (str): the job UUID + id_ (str): the job UUID status (strawberryfields.engine.JobStatus): the job status connection (strawberryfields.engine.Connection): the connection over which the job is managed @@ -942,30 +948,38 @@ def __init__(self, id_, status, connection): self._status = status self._connection = connection - # TODO need this? - self._circuit = None self._result = None @property def id(self): + """The job UUID. + + Returns: + str: the job UUID + """ return self._id @property def status(self): + """The job status. + + Returns: + strawberryfields.engine.JobStatus: the job status + """ return self._status @property def result(self): """The job result. - This is only defined for complete jobs, and raises a `JobNotCompleteError` for - any other status. + This is only defined for complete jobs, and raises a `ResultOfIncompleteJobError` + for any other status. Returns: strawberryfields.engine.Result: the result """ if self.status != JobStatus.COMPLETE: - raise JobNotCompleteError( + raise ResultOfIncompleteJobError( "The result is undefined for jobs that are not complete " "(current status: {})".format(self.status.value) ) From 821bf969c1c8584ba8db7d495a31c189128ba2ed Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 12:20:53 -0500 Subject: [PATCH 126/335] Linting, docs, cleanup --- tests/frontend/test_engine.py | 101 +++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 38 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 730c4123a..1c01f41b2 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -13,81 +13,75 @@ # limitations under the License. r"""Unit tests for engine.py""" from datetime import datetime +from unittest.mock import MagicMock import pytest -from unittest.mock import MagicMock, call import strawberryfields as sf from strawberryfields import ops -from strawberryfields.backends.base import BaseBackend - -from strawberryfields.configuration import Configuration from strawberryfields.engine import ( StarshipEngine, Connection, Job, JobStatus, Result, - InvalidEngineTargetError, - IncompleteJobError, CreateJobRequestError, GetAllJobsRequestError, GetJobRequestError, GetJobResultRequestError, - GetJobCircuitRequestError, - CancelJobRequestError, - RefreshTerminalJobError, - CancelTerminalJobError, ) pytestmark = pytest.mark.frontend +# pylint: disable=redefined-outer-name,no-self-use @pytest.fixture -def prog(backend): - """Program fixture.""" - prog = sf.Program(2) - with prog.context as q: +def prog(): + """A simple program for testing purposes. + """ + program = sf.Program(2) + with program.context as q: + # pylint: disable=expression-not-assigned ops.Dgate(0.5) | q[0] - return prog + return program + + +@pytest.fixture +def connection(): + """A mock connection object. + """ + return Connection(token="token", host="host", port=123, use_ssl=True) def mock_return(return_value): + """A helper function for defining a mock function that returns the given value for + any arguments. + """ return lambda *args, **kwargs: return_value def mock_response(status_code, json_return_value): + """A helper function for defining a mock response from the remote platform. + """ response = MagicMock() response.status_code = status_code response.json.return_value = json_return_value return response -@pytest.fixture -def config(): - # TODO anything to do here? - return Configuration() - - -@pytest.fixture -def connection(config): - return Connection( - token=config.api["authentication_token"], - host=config.api["hostname"], - port=config.api["port"], - use_ssl=config.api["use_ssl"], - ) - - -# TODO explore mocking an actual http server here (e.g. with `http.server`) class MockServer: - # Fake a job processing delay + """A mock platform server that fakes a processing delay by counting requests. + """ + REQUESTS_BEFORE_COMPLETE = 3 def __init__(self): self.request_count = 0 def get_job_status(self, _id): + """Returns a 'queued' job status until the number of requests exceeds a defined + threshold, beyond which a 'complete' job status is returned. + """ self.request_count += 1 return ( JobStatus.COMPLETE @@ -97,7 +91,12 @@ def get_job_status(self, _id): class TestStarshipEngine: + """Tests for the `StarshipEngine` class. + """ + def test_run_complete(self, connection, prog, monkeypatch): + """Tests a successful synchronous job execution. + """ id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() @@ -119,13 +118,16 @@ def test_run_complete(self, connection, prog, monkeypatch): assert result.samples.T.tolist() == result_expected with pytest.raises(AttributeError): - result.state + _ = result.state - def test_run_cancelled(self, connection, prog, monkeypatch): - server = MockServer() + def test_run_cancelled(self): + """Tests a manual cancellation of synchronous job execution. + """ # TODO def test_run_async(self, connection, prog, monkeypatch): + """Tests a successful asynchronous job execution. + """ id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() @@ -143,7 +145,7 @@ def test_run_async(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) job = engine.run_async(prog) - job.status == JobStatus.OPEN + assert job.status == JobStatus.OPEN for _ in range(server.REQUESTS_BEFORE_COMPLETE): job.refresh() @@ -152,11 +154,16 @@ def test_run_async(self, connection, prog, monkeypatch): assert job.result.samples.T.tolist() == result_expected with pytest.raises(AttributeError): - job.result.state + _ = job.result.state class TestConnection: + """Tests for the `Connection` class. + """ + def test_create_job(self, connection, monkeypatch): + """Tests a successful job creation flow. + """ id_, status = "123", JobStatus.QUEUED monkeypatch.setattr( @@ -171,12 +178,16 @@ def test_create_job(self, connection, monkeypatch): assert job.status == status def test_create_job_error(self, connection, monkeypatch): + """Tests a failed job creation flow. + """ monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) with pytest.raises(CreateJobRequestError): connection.create_job("circuit") def test_get_all_jobs(self, connection, monkeypatch): + """Tests a successful job list retrieval. + """ jobs = [ { "id": str(i), @@ -194,12 +205,16 @@ def test_get_all_jobs(self, connection, monkeypatch): assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] def test_get_all_jobs_error(self, connection, monkeypatch): + """Tests a failed job list retrieval. + """ monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetAllJobsRequestError): connection.get_all_jobs() def test_get_job(self, connection, monkeypatch): + """Tests a successful job retrieval. + """ id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -214,12 +229,16 @@ def test_get_job(self, connection, monkeypatch): assert job.status == status def test_get_job_error(self, connection, monkeypatch): + """Tests a failed job retrieval. + """ monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobRequestError): connection.get_job("123") def test_get_job_status(self, connection, monkeypatch): + """Tests a successful job status retrieval. + """ id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -231,12 +250,16 @@ def test_get_job_status(self, connection, monkeypatch): assert connection.get_job_status(id_) == status def test_get_job_status_error(self, connection, monkeypatch): + """Tests a failed job status retrieval. + """ monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobRequestError): connection.get_job_status("123") def test_get_job_result(self, connection, monkeypatch): + """Tests a successful job result retrieval. + """ result_samples = [[1, 2], [3, 4]] monkeypatch.setattr( @@ -250,6 +273,8 @@ def test_get_job_result(self, connection, monkeypatch): assert result.samples.T.tolist() == result_samples def test_get_job_result_error(self, connection, monkeypatch): + """Tests a failed job result retrieval. + """ monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobResultRequestError): From ef451309ee68dfa46b50983c662c9c547a929da6 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 12:59:02 -0500 Subject: [PATCH 127/335] Linting, formatting --- starship | 14 ++++----- strawberryfields/engine.py | 52 ++++++++++++------------------- tests/frontend/test_engine.py | 58 ++++++++++++----------------------- 3 files changed, 45 insertions(+), 79 deletions(-) diff --git a/starship b/starship index 0a7a3fcd1..7e1dcf400 100755 --- a/starship +++ b/starship @@ -14,11 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""A simple command-line interface for computing quantum programs on the Xanadu cloud +platform. +""" -import os import sys import argparse -import pdb from strawberryfields.engine import StarshipEngine, Connection from strawberryfields.io import load @@ -26,7 +27,7 @@ from strawberryfields.io import load if __name__ == "__main__": parser = argparse.ArgumentParser( - description="run a blackbird script on StarshipEngine" + description="run a blackbird script on the Xanadu cloud platform" ) parser.add_argument( "--token", "-t", help="the API authentication token", required=True @@ -45,11 +46,7 @@ if __name__ == "__main__": args = parser.parse_args() connection = Connection( - token=args.token, - host="platform.strawberryfields.ai", - port=443, - use_ssl=True, - debug=False, + token=args.token, host="platform.strawberryfields.ai", port=443, use_ssl=True, ) if args.ping: @@ -57,6 +54,7 @@ if __name__ == "__main__": connection.ping() sys.stdout.write("You have successfully authenticated to the platform!\n") sys.exit() + # pylint: disable=broad-except except Exception as e: sys.stderr.write("Could not connect to server:\n{}\n".format(e)) sys.exit(1) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 40afef4ef..0bc252c3e 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -28,7 +28,7 @@ import numpy as np import requests -from strawberryfields.configuration import Configuration +from strawberryfields.configuration import DEFAULT_CONFIG from strawberryfields.io import to_blackbird from .backends import load_backend from .backends.base import NotApplicableError, BaseBackend @@ -546,38 +546,31 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): class InvalidEngineTargetError(Exception): - """Raised when an invalid engine target is provided. - """ + """Raised when an invalid engine target is provided.""" class IncompleteJobError(Exception): - """Raised when an invalid action is performed on an incomplete job. - """ + """Raised when an invalid action is performed on an incomplete job.""" class CreateJobRequestError(Exception): - """Raised when a request to create a job fails. - """ + """Raised when a request to create a job fails.""" class GetAllJobsRequestError(Exception): - """Raised when a request to get all jobs fails. - """ + """Raised when a request to get all jobs fails.""" class GetJobRequestError(Exception): - """Raised when a request to get a job fails. - """ + """Raised when a request to get a job fails.""" class GetJobResultRequestError(Exception): - """Raised when a request to get a job result fails. - """ + """Raised when a request to get a job result fails.""" class CancelJobRequestError(Exception): - """Raised when a request to cancel a job fails. - """ + """Raised when a request to cancel a job fails.""" class RefreshTerminalJobError(Exception): @@ -640,8 +633,8 @@ def __init__(self, target, connection=None): if target not in self.VALID_TARGETS: raise InvalidEngineTargetError("Invalid engine target: {}".format(target)) if connection is None: - # TODO update this when config is implemented - config = Configuration().api + # TODO use `load_config` when implemented + config = DEFAULT_CONFIG["api"] connection = Connection( token=config["authentication_token"], host=config["hostname"], @@ -685,12 +678,10 @@ def run(self, program, shots=1): """ job = self.run_async(program, shots) try: - # TODO worth setting a timeout here? while True: job.refresh() if job.status in (JobStatus.COMPLETE, JobStatus.FAILED): return job.result - print(job.status) time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) @@ -724,17 +715,18 @@ class Connection: is encouraged to use the higher-level interface provided by `StarshipEngine`. Args: - TODO + token (str): the API authentication token + host (str): the hostname of the remote platform + port (int): the port to connect to on the remote host + use_ssl (bool): whether to use SSL for the connection """ - # TODO adjust this MAX_JOBS_REQUESTED = 100 JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - def __init__(self, token, host=None, port=None, use_ssl=None): - # TODO use `read_config` when implemented - # e.g. read_config(host="abc", port=123) - + def __init__( + self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True + ): self._token = token self._host = host self._port = port @@ -751,7 +743,7 @@ def token(self): @property def host(self): - """The host for the remote backend. + """The host for the remote platform. Returns: str: the hostname @@ -787,8 +779,6 @@ def base_url(self): "s" if self.use_ssl else "", self.host, self.port ) - # TODO think about using serializers for the request wrappers - future PR maybe? - def create_job(self, circuit): """Creates a job with the given circuit. @@ -817,8 +807,6 @@ def get_all_jobs(self, after=datetime(1970, 1, 1)): Returns: List[strawberryfields.engine.Job]: the jobs """ - # TODO figure out how to handle pagination from the user's perspective (if at all) - # TODO tentative until corresponding feature on platform side is finalized response = self._get("/jobs?page[size]={}".format(self.MAX_JOBS_REQUESTED)) if response.status_code == 200: return [ @@ -867,7 +855,6 @@ def get_job_result(self, job_id): Returns: strawberryfields.engine.Result: the job result """ - # TODO get numpy here? response = self._get("/jobs/{}/result".format(job_id)) if response.status_code == 200: return Result(response.json()["result"], is_stateful=False) @@ -922,8 +909,7 @@ def _request_error_message(response): class RequestMethod(enum.Enum): - """Defines the valid request methods for messages sent to the remote job platform. - """ + """Defines the valid request methods for messages sent to the remote job platform.""" GET = "get" POST = "post" diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 1c01f41b2..d666c271f 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -35,10 +35,10 @@ # pylint: disable=redefined-outer-name,no-self-use + @pytest.fixture def prog(): - """A simple program for testing purposes. - """ + """A simple program for testing purposes.""" program = sf.Program(2) with program.context as q: # pylint: disable=expression-not-assigned @@ -48,8 +48,7 @@ def prog(): @pytest.fixture def connection(): - """A mock connection object. - """ + """A mock connection object.""" return Connection(token="token", host="host", port=123, use_ssl=True) @@ -61,8 +60,7 @@ def mock_return(return_value): def mock_response(status_code, json_return_value): - """A helper function for defining a mock response from the remote platform. - """ + """A helper function for defining a mock response from the remote platform.""" response = MagicMock() response.status_code = status_code response.json.return_value = json_return_value @@ -70,8 +68,7 @@ def mock_response(status_code, json_return_value): class MockServer: - """A mock platform server that fakes a processing delay by counting requests. - """ + """A mock platform server that fakes a processing delay by counting requests.""" REQUESTS_BEFORE_COMPLETE = 3 @@ -91,12 +88,10 @@ def get_job_status(self, _id): class TestStarshipEngine: - """Tests for the `StarshipEngine` class. - """ + """Tests for the `StarshipEngine` class.""" def test_run_complete(self, connection, prog, monkeypatch): - """Tests a successful synchronous job execution. - """ + """Tests a successful synchronous job execution.""" id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() @@ -121,13 +116,11 @@ def test_run_complete(self, connection, prog, monkeypatch): _ = result.state def test_run_cancelled(self): - """Tests a manual cancellation of synchronous job execution. - """ + """Tests a manual cancellation of synchronous job execution.""" # TODO def test_run_async(self, connection, prog, monkeypatch): - """Tests a successful asynchronous job execution. - """ + """Tests a successful asynchronous job execution.""" id_, result_expected = "123", [[1, 2], [3, 4]] server = MockServer() @@ -158,12 +151,10 @@ def test_run_async(self, connection, prog, monkeypatch): class TestConnection: - """Tests for the `Connection` class. - """ + """Tests for the `Connection` class.""" def test_create_job(self, connection, monkeypatch): - """Tests a successful job creation flow. - """ + """Tests a successful job creation flow.""" id_, status = "123", JobStatus.QUEUED monkeypatch.setattr( @@ -178,16 +169,14 @@ def test_create_job(self, connection, monkeypatch): assert job.status == status def test_create_job_error(self, connection, monkeypatch): - """Tests a failed job creation flow. - """ + """Tests a failed job creation flow.""" monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) with pytest.raises(CreateJobRequestError): connection.create_job("circuit") def test_get_all_jobs(self, connection, monkeypatch): - """Tests a successful job list retrieval. - """ + """Tests a successful job list retrieval.""" jobs = [ { "id": str(i), @@ -205,16 +194,14 @@ def test_get_all_jobs(self, connection, monkeypatch): assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] def test_get_all_jobs_error(self, connection, monkeypatch): - """Tests a failed job list retrieval. - """ + """Tests a failed job list retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetAllJobsRequestError): connection.get_all_jobs() def test_get_job(self, connection, monkeypatch): - """Tests a successful job retrieval. - """ + """Tests a successful job retrieval.""" id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -229,16 +216,14 @@ def test_get_job(self, connection, monkeypatch): assert job.status == status def test_get_job_error(self, connection, monkeypatch): - """Tests a failed job retrieval. - """ + """Tests a failed job retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobRequestError): connection.get_job("123") def test_get_job_status(self, connection, monkeypatch): - """Tests a successful job status retrieval. - """ + """Tests a successful job status retrieval.""" id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -250,16 +235,14 @@ def test_get_job_status(self, connection, monkeypatch): assert connection.get_job_status(id_) == status def test_get_job_status_error(self, connection, monkeypatch): - """Tests a failed job status retrieval. - """ + """Tests a failed job status retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobRequestError): connection.get_job_status("123") def test_get_job_result(self, connection, monkeypatch): - """Tests a successful job result retrieval. - """ + """Tests a successful job result retrieval.""" result_samples = [[1, 2], [3, 4]] monkeypatch.setattr( @@ -273,8 +256,7 @@ def test_get_job_result(self, connection, monkeypatch): assert result.samples.T.tolist() == result_samples def test_get_job_result_error(self, connection, monkeypatch): - """Tests a failed job result retrieval. - """ + """Tests a failed job result retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(GetJobResultRequestError): From 1a380710a87cba5fed4dd892ccd10d0a17c4423f Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 14:00:13 -0500 Subject: [PATCH 128/335] Consolidate exceptions --- starship | 1 + strawberryfields/engine.py | 90 ++++++++++------------------------- tests/frontend/test_engine.py | 17 +++---- 3 files changed, 32 insertions(+), 76 deletions(-) diff --git a/starship b/starship index 7e1dcf400..7a91de320 100755 --- a/starship +++ b/starship @@ -62,6 +62,7 @@ if __name__ == "__main__": program = load(args.input) eng = StarshipEngine("chip2", connection) + sys.stdout.write("Computing...") result = eng.run(program) if result and result.samples is not None: diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 0bc252c3e..27704cc36 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -34,10 +34,6 @@ from .backends.base import NotApplicableError, BaseBackend -class OneJobAtATimeError(Exception): - """Raised when a user attempts to execute more than one job on the same engine instance.""" - - # for automodapi, do not include the classes that should appear under the top-level strawberryfields namespace __all__ = ["Result", "BaseEngine", "LocalEngine"] @@ -545,48 +541,12 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): return result -class InvalidEngineTargetError(Exception): - """Raised when an invalid engine target is provided.""" - - -class IncompleteJobError(Exception): - """Raised when an invalid action is performed on an incomplete job.""" - - -class CreateJobRequestError(Exception): - """Raised when a request to create a job fails.""" - - -class GetAllJobsRequestError(Exception): - """Raised when a request to get all jobs fails.""" - - -class GetJobRequestError(Exception): - """Raised when a request to get a job fails.""" - +class RequestFailedError(Exception): + """Raised when a request to the remote platform returns an error response.""" -class GetJobResultRequestError(Exception): - """Raised when a request to get a job result fails.""" - -class CancelJobRequestError(Exception): - """Raised when a request to cancel a job fails.""" - - -class RefreshTerminalJobError(Exception): - """Raised when attempting to refresh a completed, failed, or cancelled job.""" - - -class CancelTerminalJobError(Exception): - """Raised when attempting to cancel a completed, failed, or cancelled job.""" - - -class ResultOfIncompleteJobError(Exception): - """Raised when attempting to access the result of an incomplete job.""" - - -class PingFailedError(Exception): - """Raised when a ping request to a remote backend is unsuccessful.""" +class InvalidJobOperationError(Exception): + """Raised when an invalid operation is performed on a job.""" class StarshipEngine: @@ -614,7 +574,7 @@ class StarshipEngine: # Run a job asynchronously job = engine.run_async(program, shots=1) job.status # "queued" - job.result # RefreshTerminalJobError + job.result # InvalidJobOperationError # (After some time...) job.refresh() job.status # "complete" @@ -631,7 +591,7 @@ class StarshipEngine: def __init__(self, target, connection=None): if target not in self.VALID_TARGETS: - raise InvalidEngineTargetError("Invalid engine target: {}".format(target)) + raise ValueError("Invalid engine target: {}".format(target)) if connection is None: # TODO use `load_config` when implemented config = DEFAULT_CONFIG["api"] @@ -724,9 +684,7 @@ class Connection: MAX_JOBS_REQUESTED = 100 JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - def __init__( - self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True - ): + def __init__(self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True): self._token = token self._host = host self._port = port @@ -737,7 +695,7 @@ def token(self): """The API authentication token. Returns: - str: the API token + str: the authentication token """ return self._token @@ -773,7 +731,7 @@ def base_url(self): """The base URL used for the connection. Returns: - str: the base url + str: the base URL """ return "http{}://{}:{}".format( "s" if self.use_ssl else "", self.host, self.port @@ -795,7 +753,7 @@ def create_job(self, circuit): status=JobStatus(response.json()["status"]), connection=self, ) - raise CreateJobRequestError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def get_all_jobs(self, after=datetime(1970, 1, 1)): """Gets all jobs created by the user, optionally filtered by datetime. @@ -815,7 +773,7 @@ def get_all_jobs(self, after=datetime(1970, 1, 1)): if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) > after ] - raise GetAllJobsRequestError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def get_job(self, job_id): """Gets a job. @@ -833,7 +791,7 @@ def get_job(self, job_id): status=JobStatus(response.json()["status"]), connection=self, ) - raise GetJobRequestError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def get_job_status(self, job_id): """Returns the status of a job. @@ -858,7 +816,7 @@ def get_job_result(self, job_id): response = self._get("/jobs/{}/result".format(job_id)) if response.status_code == 200: return Result(response.json()["result"], is_stateful=False) - raise GetJobResultRequestError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def cancel_job(self, job_id): """Cancels a job. @@ -871,7 +829,7 @@ def cancel_job(self, job_id): ) if response.status_code == 204: return - raise CancelJobRequestError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def ping(self): """Tests the connection to the remote backend. @@ -882,7 +840,7 @@ def ping(self): response = self._get("/healthz") if response.status_code == 200: return - raise PingFailedError(self._request_error_message(response)) + raise RequestFailedError(self._request_error_message(response)) def _get(self, path, **kwargs): return self._request(RequestMethod.GET, path, **kwargs) @@ -958,14 +916,14 @@ def status(self): def result(self): """The job result. - This is only defined for complete jobs, and raises a `ResultOfIncompleteJobError` - for any other status. + This is only defined for complete jobs, and raises an exception for any other + status. Returns: strawberryfields.engine.Result: the result """ if self.status != JobStatus.COMPLETE: - raise ResultOfIncompleteJobError( + raise AttributeError( "The result is undefined for jobs that are not complete " "(current status: {})".format(self.status.value) ) @@ -975,11 +933,11 @@ def refresh(self): """Refreshes the status of the job, along with the job result if the job is newly completed. - Only a non-terminal (open or queued job) can be refreshed; a - `RefreshTerminalJobError` is raised otherwise. + Only a non-terminal (open or queued job) can be refreshed; an exception is + raised otherwise. """ if self.status.is_terminal: - raise RefreshTerminalJobError( + raise InvalidJobOperationError( "A {} job cannot be refreshed".format(self.status.value) ) self._status = self._connection.get_job_status(self.id) @@ -989,11 +947,11 @@ def refresh(self): def cancel(self): """Cancels the job. - Only a non-terminal (open or queued job) can be cancelled; a - `CancelTerminalJobError` is raised otherwise. + Only a non-terminal (open or queued job) can be cancelled; an exception is + raised otherwise. """ if self.status.is_terminal: - raise CancelTerminalJobError( + raise InvalidJobOperationError( "A {} job cannot be cancelled".format(self.status.value) ) self._connection.cancel_job(self.id) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index d666c271f..794ece205 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -20,15 +20,12 @@ import strawberryfields as sf from strawberryfields import ops from strawberryfields.engine import ( - StarshipEngine, Connection, + RequestFailedError, Job, JobStatus, Result, - CreateJobRequestError, - GetAllJobsRequestError, - GetJobRequestError, - GetJobResultRequestError, + StarshipEngine, ) pytestmark = pytest.mark.frontend @@ -172,7 +169,7 @@ def test_create_job_error(self, connection, monkeypatch): """Tests a failed job creation flow.""" monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) - with pytest.raises(CreateJobRequestError): + with pytest.raises(RequestFailedError): connection.create_job("circuit") def test_get_all_jobs(self, connection, monkeypatch): @@ -197,7 +194,7 @@ def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) - with pytest.raises(GetAllJobsRequestError): + with pytest.raises(RequestFailedError): connection.get_all_jobs() def test_get_job(self, connection, monkeypatch): @@ -219,7 +216,7 @@ def test_get_job_error(self, connection, monkeypatch): """Tests a failed job retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) - with pytest.raises(GetJobRequestError): + with pytest.raises(RequestFailedError): connection.get_job("123") def test_get_job_status(self, connection, monkeypatch): @@ -238,7 +235,7 @@ def test_get_job_status_error(self, connection, monkeypatch): """Tests a failed job status retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) - with pytest.raises(GetJobRequestError): + with pytest.raises(RequestFailedError): connection.get_job_status("123") def test_get_job_result(self, connection, monkeypatch): @@ -259,5 +256,5 @@ def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result retrieval.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) - with pytest.raises(GetJobResultRequestError): + with pytest.raises(RequestFailedError): connection.get_job_result("123") From 2818bc5bc5dde3bea5cf8abb7de585155c7be3da Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 14:22:18 -0500 Subject: [PATCH 129/335] StarshipEngine.run() raises exception if remote job enters failed status --- starship | 15 +++++---------- strawberryfields/engine.py | 30 +++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/starship b/starship index 7a91de320..aa06794a6 100755 --- a/starship +++ b/starship @@ -50,20 +50,15 @@ if __name__ == "__main__": ) if args.ping: - try: - connection.ping() - sys.stdout.write("You have successfully authenticated to the platform!\n") - sys.exit() - # pylint: disable=broad-except - except Exception as e: - sys.stderr.write("Could not connect to server:\n{}\n".format(e)) - sys.exit(1) + connection.ping() + sys.stdout.write("You have successfully authenticated to the platform!\n") + sys.exit() program = load(args.input) eng = StarshipEngine("chip2", connection) - sys.stdout.write("Computing...") - result = eng.run(program) + sys.stdout.write("Computing...\n") + result = eng.run(program, shots=5) if result and result.samples is not None: if args.output: diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 27704cc36..2f269dd69 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -549,6 +549,10 @@ class InvalidJobOperationError(Exception): """Raised when an invalid operation is performed on a job.""" +class JobFailedError(Exception): + """Raised when a remote job enters a 'failed' status.""" + + class StarshipEngine: """A quantum program executor engine that that provides a simple interface for running remote jobs in a synchronous or asynchronous manner. @@ -568,7 +572,7 @@ class StarshipEngine: result # [[0, 1, 0, 2, 1, 0, 0, 0]] # Run a job synchronously, but cancel it before it is completed - _ = engine.run(program, shots=1) + result = engine.run(program, shots=1) ^C # KeyboardInterrupt cancels the job # Run a job asynchronously @@ -591,7 +595,11 @@ class StarshipEngine: def __init__(self, target, connection=None): if target not in self.VALID_TARGETS: - raise ValueError("Invalid engine target: {}".format(target)) + raise ValueError( + "Invalid engine target: {} (valid targets: {})".format( + target, self.VALID_TARGETS + ) + ) if connection is None: # TODO use `load_config` when implemented config = DEFAULT_CONFIG["api"] @@ -616,7 +624,7 @@ def target(self): @property def connection(self): - """Returns the connection object used by the engine. + """The connection object used by the engine. Returns: strawberryfields.engine.Connection: the connection object used by the engine @@ -627,7 +635,7 @@ def run(self, program, shots=1): """Runs a remote job synchronously. In this synchronous mode, the engine blocks until the job is completed, failed, or - cancelled, at which point the `Job` is returned. + cancelled, at which point the result is returned. Args: program (Program): the quantum circuit @@ -640,8 +648,12 @@ def run(self, program, shots=1): try: while True: job.refresh() - if job.status in (JobStatus.COMPLETE, JobStatus.FAILED): + if job.status == JobStatus.COMPLETE: return job.result + if job.status == JobStatus.FAILED: + raise JobFailedError( + "The computation failed on the remote platform; please try again." + ) time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) @@ -684,7 +696,9 @@ class Connection: MAX_JOBS_REQUESTED = 100 JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - def __init__(self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True): + def __init__( + self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True + ): self._token = token self._host = host self._port = port @@ -753,7 +767,9 @@ def create_job(self, circuit): status=JobStatus(response.json()["status"]), connection=self, ) - raise RequestFailedError(self._request_error_message(response)) + raise RequestFailedError( + "Job creation failed: {}".format(self._request_error_message(response)) + ) def get_all_jobs(self, after=datetime(1970, 1, 1)): """Gets all jobs created by the user, optionally filtered by datetime. From 8d21922ac952af35f27117543d5bf75ec5824c30 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 15:30:25 -0500 Subject: [PATCH 130/335] Polish docstrings --- starship | 4 +- strawberryfields/engine.py | 104 ++++++++++++++++++---------------- tests/frontend/test_engine.py | 16 +++--- 3 files changed, 64 insertions(+), 60 deletions(-) diff --git a/starship b/starship index aa06794a6..e8c7756dd 100755 --- a/starship +++ b/starship @@ -21,7 +21,7 @@ platform. import sys import argparse -from strawberryfields.engine import StarshipEngine, Connection +from strawberryfields.engine import StarshipEngine, Connection, JobFailedError from strawberryfields.io import load @@ -58,7 +58,7 @@ if __name__ == "__main__": eng = StarshipEngine("chip2", connection) sys.stdout.write("Computing...\n") - result = eng.run(program, shots=5) + result = eng.run(program, shots=1) if result and result.samples is not None: if args.output: diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 2f269dd69..d4deb7dce 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -23,6 +23,7 @@ import enum import json import time +from typing import List from urllib.parse import urljoin import numpy as np @@ -35,7 +36,7 @@ # for automodapi, do not include the classes that should appear under the top-level strawberryfields namespace -__all__ = ["Result", "BaseEngine", "LocalEngine"] +__all__ = ["Result", "BaseEngine", "LocalEngine", "Connection"] class Result: @@ -559,7 +560,7 @@ class StarshipEngine: **Example:** - The following example instantiates a `StarshipEngine` with default configuration, and + The following example instantiates an engine with the default configuration, and runs jobs both synchronously and asynchronously. .. code-block:: python @@ -587,13 +588,13 @@ class StarshipEngine: Args: target (str): the target backend connection (strawberryfields.engine.Connection): a connection to the remote job - execution platform + execution platform """ POLLING_INTERVAL_SECONDS = 1 VALID_TARGETS = ("chip2",) - def __init__(self, target, connection=None): + def __init__(self, target: str, connection: Connection = None): if target not in self.VALID_TARGETS: raise ValueError( "Invalid engine target: {} (valid targets: {})".format( @@ -614,7 +615,7 @@ def __init__(self, target, connection=None): self._connection = connection @property - def target(self): + def target(self) -> str: """The target backend used by the engine. Returns: @@ -623,7 +624,7 @@ def target(self): return self._target @property - def connection(self): + def connection(self) -> Connection: """The connection object used by the engine. Returns: @@ -631,18 +632,20 @@ def connection(self): """ return self._connection - def run(self, program, shots=1): + def run(self, program: Program, shots: int = 1) -> Optional[Result]: """Runs a remote job synchronously. In this synchronous mode, the engine blocks until the job is completed, failed, or - cancelled, at which point the result is returned. + cancelled. If the job completes successfully, the result is returned; if the job + fails or is cancelled, ``None`` is returned. Args: - program (Program): the quantum circuit + program (strawberryfields.Program): the quantum circuit shots (int): the number of shots for which to run the job Returns: - strawberryfields.engine.Result: the job result + [strawberryfields.engine.Result, None]: the job result if successful, and + ``None`` otherwise """ job = self.run_async(program, shots) try: @@ -658,7 +661,7 @@ def run(self, program, shots=1): except KeyboardInterrupt: self._connection.cancel_job(job.id) - def run_async(self, program, shots=1): + def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. In this asynchronous mode, a `Job` is returned immediately, and the user can @@ -684,7 +687,7 @@ class Connection: advanced job operations. For basic usage, it is not necessary to manually instantiate this object; the user - is encouraged to use the higher-level interface provided by `StarshipEngine`. + is encouraged to use the higher-level interface provided by :class:`~StarshipEngine`. Args: token (str): the API authentication token @@ -705,7 +708,7 @@ def __init__( self._use_ssl = use_ssl @property - def token(self): + def token(self) -> str: """The API authentication token. Returns: @@ -714,7 +717,7 @@ def token(self): return self._token @property - def host(self): + def host(self) -> str: """The host for the remote platform. Returns: @@ -723,7 +726,7 @@ def host(self): return self._host @property - def port(self): + def port(self) -> int: """The port to connect to on the remote host. Returns: @@ -732,16 +735,16 @@ def port(self): return self._port @property - def use_ssl(self): + def use_ssl(self) -> bool: """Whether to use SSL for the connection. Returns: - bool: True if SSL should be used, and False otherwise + bool: ``True`` if SSL should be used, and ``False`` otherwise """ return self._use_ssl @property - def base_url(self): + def base_url(self) -> str: """The base URL used for the connection. Returns: @@ -751,7 +754,7 @@ def base_url(self): "s" if self.use_ssl else "", self.host, self.port ) - def create_job(self, circuit): + def create_job(self, circuit: str) -> Job: """Creates a job with the given circuit. Args: @@ -771,12 +774,14 @@ def create_job(self, circuit): "Job creation failed: {}".format(self._request_error_message(response)) ) - def get_all_jobs(self, after=datetime(1970, 1, 1)): + def get_all_jobs( + self, after: datetime.datetime = datetime(1970, 1, 1) + ) -> List[Job]: """Gets all jobs created by the user, optionally filtered by datetime. Args: after (datetime.datetime): if provided, only jobs more recently created - then `after` are returned + then ``after`` are returned Returns: List[strawberryfields.engine.Job]: the jobs @@ -791,11 +796,11 @@ def get_all_jobs(self, after=datetime(1970, 1, 1)): ] raise RequestFailedError(self._request_error_message(response)) - def get_job(self, job_id): + def get_job(self, job_id: str) -> Job: """Gets a job. Args: - job_id (str): the job UUID + job_id (str): the job ID Returns: strawberryfields.engine.Job: the job @@ -809,22 +814,22 @@ def get_job(self, job_id): ) raise RequestFailedError(self._request_error_message(response)) - def get_job_status(self, job_id): + def get_job_status(self, job_id: str) -> JobStatus: """Returns the status of a job. Args: - job_id (str): the job UUID + job_id (str): the job ID Returns: strawberryfields.engine.JobStatus: the job status """ return JobStatus(self.get_job(job_id).status) - def get_job_result(self, job_id): + def get_job_result(self, job_id: str) -> Result: """Returns the result of a job. Args: - job_id (str): the job UUID + job_id (str): the job ID Returns: strawberryfields.engine.Result: the job result @@ -834,11 +839,11 @@ def get_job_result(self, job_id): return Result(response.json()["result"], is_stateful=False) raise RequestFailedError(self._request_error_message(response)) - def cancel_job(self, job_id): + def cancel_job(self, job_id: str): """Cancels a job. Args: - job_id (str): the job UUID + job_id (str): the job ID """ response = self._patch( "/jobs/{}".format(job_id), data={"status", JobStatus.CANCELLED.value} @@ -847,27 +852,27 @@ def cancel_job(self, job_id): return raise RequestFailedError(self._request_error_message(response)) - def ping(self): + def ping(self) -> bool: """Tests the connection to the remote backend. Returns: - bool: True if the connection is successful, and False otherwise + bool: ``True`` if the connection is successful, and ``False`` otherwise """ response = self._get("/healthz") if response.status_code == 200: - return - raise RequestFailedError(self._request_error_message(response)) + return True + return False - def _get(self, path, **kwargs): + def _get(self, path: str, **kwargs) -> requests.Response: return self._request(RequestMethod.GET, path, **kwargs) - def _post(self, path, **kwargs): + def _post(self, path: str, **kwargs) -> requests.Response: return self._request(RequestMethod.POST, path, **kwargs) - def _patch(self, path, **kwargs): + def _patch(self, path: str, **kwargs) -> requests.Response: return self._request(RequestMethod.PATCH, path, **kwargs) - def _request(self, method, path, **kwargs): + def _request(self, method: RequestMethod, path: str, **kwargs) -> requests.Response: return getattr(requests, method.value)( urljoin(self.base_url, path), headers={"Authorization": self.token}, @@ -875,7 +880,7 @@ def _request(self, method, path, **kwargs): ) @staticmethod - def _request_error_message(response): + def _request_error_message(response: requests.Response) -> str: body = response.json() return "{} ({}): {}".format( body.get("status_code", ""), body.get("code", ""), body.get("detail", "") @@ -897,30 +902,29 @@ class Job: `Connection` when a job is run. Args: - id_ (str): the job UUID + id_ (str): the job ID status (strawberryfields.engine.JobStatus): the job status connection (strawberryfields.engine.Connection): the connection over which the job is managed """ - def __init__(self, id_, status, connection): + def __init__(self, id_: str, status: JobStatus, connection: Connection): self._id = id_ self._status = status self._connection = connection - self._result = None @property - def id(self): - """The job UUID. + def id(self) -> str: + """The job ID. Returns: - str: the job UUID + str: the job ID """ return self._id @property - def status(self): + def status(self) -> JobStatus: """The job status. Returns: @@ -929,7 +933,7 @@ def status(self): return self._status @property - def result(self): + def result(self) -> Result: """The job result. This is only defined for complete jobs, and raises an exception for any other @@ -986,21 +990,21 @@ class JobStatus(enum.Enum): COMPLETE = "complete" FAILED = "failed" - def __repr__(self): + def __repr__(self) -> str: return self.value - def __str__(self): + def __str__(self) -> str: return self.value @property - def is_terminal(self): + def is_terminal(self) -> bool: """Checks if this status represents a final and immutable state. This method is generally used to determine if an operation is valid for a given status. Returns: - bool: True if the job is terminal, and False otherwise + bool: ``True`` if the job is terminal, and ``False`` otherwise """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 794ece205..e6aa6300f 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -173,7 +173,7 @@ def test_create_job_error(self, connection, monkeypatch): connection.create_job("circuit") def test_get_all_jobs(self, connection, monkeypatch): - """Tests a successful job list retrieval.""" + """Tests a successful job list request.""" jobs = [ { "id": str(i), @@ -191,14 +191,14 @@ def test_get_all_jobs(self, connection, monkeypatch): assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] def test_get_all_jobs_error(self, connection, monkeypatch): - """Tests a failed job list retrieval.""" + """Tests a failed job list request.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_all_jobs() def test_get_job(self, connection, monkeypatch): - """Tests a successful job retrieval.""" + """Tests a successful job request.""" id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -213,14 +213,14 @@ def test_get_job(self, connection, monkeypatch): assert job.status == status def test_get_job_error(self, connection, monkeypatch): - """Tests a failed job retrieval.""" + """Tests a failed job request.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_job("123") def test_get_job_status(self, connection, monkeypatch): - """Tests a successful job status retrieval.""" + """Tests a successful job status request.""" id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( @@ -232,14 +232,14 @@ def test_get_job_status(self, connection, monkeypatch): assert connection.get_job_status(id_) == status def test_get_job_status_error(self, connection, monkeypatch): - """Tests a failed job status retrieval.""" + """Tests a failed job status request.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_job_status("123") def test_get_job_result(self, connection, monkeypatch): - """Tests a successful job result retrieval.""" + """Tests a successful job result request.""" result_samples = [[1, 2], [3, 4]] monkeypatch.setattr( @@ -253,7 +253,7 @@ def test_get_job_result(self, connection, monkeypatch): assert result.samples.T.tolist() == result_samples def test_get_job_result_error(self, connection, monkeypatch): - """Tests a failed job result retrieval.""" + """Tests a failed job result request.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): From a161fca656a2676f637a379e45d16e5a9e20146a Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 15:44:49 -0500 Subject: [PATCH 131/335] Add type hints --- strawberryfields/engine.py | 379 ++++++++++++++++++------------------- 1 file changed, 189 insertions(+), 190 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index d4deb7dce..c81c124df 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -19,11 +19,11 @@ """ import abc import collections.abc -from datetime import datetime import enum import json import time -from typing import List +from datetime import datetime +from typing import List, Optional from urllib.parse import urljoin import numpy as np @@ -31,9 +31,10 @@ from strawberryfields.configuration import DEFAULT_CONFIG from strawberryfields.io import to_blackbird -from .backends import load_backend -from .backends.base import NotApplicableError, BaseBackend +from strawberryfields.program import Program +from .backends import load_backend +from .backends.base import BaseBackend, NotApplicableError # for automodapi, do not include the classes that should appear under the top-level strawberryfields namespace __all__ = ["Result", "BaseEngine", "LocalEngine", "Connection"] @@ -554,132 +555,126 @@ class JobFailedError(Exception): """Raised when a remote job enters a 'failed' status.""" -class StarshipEngine: - """A quantum program executor engine that that provides a simple interface for - running remote jobs in a synchronous or asynchronous manner. +class JobStatus(enum.Enum): + """Represents the status of a remote job. - **Example:** + This class maps a set of job statuses to the string representations returned by the + remote platform. + """ - The following example instantiates an engine with the default configuration, and - runs jobs both synchronously and asynchronously. + OPEN = "open" + QUEUED = "queued" + CANCELLED = "cancelled" + COMPLETE = "complete" + FAILED = "failed" - .. code-block:: python + def __repr__(self) -> str: + return self.value - engine = StarshipEngine("chip2") + def __str__(self) -> str: + return self.value - # Run a job synchronously - result = engine.run(program, shots=1) - # (Engine blocks until job is complete) - result # [[0, 1, 0, 2, 1, 0, 0, 0]] + @property + def is_terminal(self) -> bool: + """Checks if this status represents a final and immutable state. - # Run a job synchronously, but cancel it before it is completed - result = engine.run(program, shots=1) - ^C # KeyboardInterrupt cancels the job + This method is generally used to determine if an operation is valid for a given + status. - # Run a job asynchronously - job = engine.run_async(program, shots=1) - job.status # "queued" - job.result # InvalidJobOperationError - # (After some time...) - job.refresh() - job.status # "complete" - job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] + Returns: + bool: ``True`` if the job is terminal, and ``False`` otherwise + """ + return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) - Args: - target (str): the target backend - connection (strawberryfields.engine.Connection): a connection to the remote job - execution platform - """ - POLLING_INTERVAL_SECONDS = 1 - VALID_TARGETS = ("chip2",) +class Job: + """Represents a remote job that can be queried for its status or result. - def __init__(self, target: str, connection: Connection = None): - if target not in self.VALID_TARGETS: - raise ValueError( - "Invalid engine target: {} (valid targets: {})".format( - target, self.VALID_TARGETS - ) - ) - if connection is None: - # TODO use `load_config` when implemented - config = DEFAULT_CONFIG["api"] - connection = Connection( - token=config["authentication_token"], - host=config["hostname"], - port=config["port"], - use_ssl=config["use_ssl"], - ) + This object should not be instantiated directly, but returned by an `Engine` or + `Connection` when a job is run. - self._target = target + Args: + id_ (str): the job ID + status (strawberryfields.engine.JobStatus): the job status + connection (strawberryfields.engine.Connection): the connection over which the + job is managed + """ + + def __init__(self, id_: str, status: JobStatus, connection: "Connection"): + self._id = id_ + self._status = status self._connection = connection + self._result = None @property - def target(self) -> str: - """The target backend used by the engine. + def id(self) -> str: + """The job ID. Returns: - str: the target backend used by the engine + str: the job ID """ - return self._target + return self._id @property - def connection(self) -> Connection: - """The connection object used by the engine. + def status(self) -> JobStatus: + """The job status. Returns: - strawberryfields.engine.Connection: the connection object used by the engine + strawberryfields.engine.JobStatus: the job status """ - return self._connection - - def run(self, program: Program, shots: int = 1) -> Optional[Result]: - """Runs a remote job synchronously. + return self._status - In this synchronous mode, the engine blocks until the job is completed, failed, or - cancelled. If the job completes successfully, the result is returned; if the job - fails or is cancelled, ``None`` is returned. + @property + def result(self) -> Result: + """The job result. - Args: - program (strawberryfields.Program): the quantum circuit - shots (int): the number of shots for which to run the job + This is only defined for complete jobs, and raises an exception for any other + status. Returns: - [strawberryfields.engine.Result, None]: the job result if successful, and - ``None`` otherwise + strawberryfields.engine.Result: the result """ - job = self.run_async(program, shots) - try: - while True: - job.refresh() - if job.status == JobStatus.COMPLETE: - return job.result - if job.status == JobStatus.FAILED: - raise JobFailedError( - "The computation failed on the remote platform; please try again." - ) - time.sleep(self.POLLING_INTERVAL_SECONDS) - except KeyboardInterrupt: - self._connection.cancel_job(job.id) + if self.status != JobStatus.COMPLETE: + raise AttributeError( + "The result is undefined for jobs that are not complete " + "(current status: {})".format(self.status.value) + ) + return self._result - def run_async(self, program: Program, shots: int = 1) -> Job: - """Runs a remote job asynchronously. + def refresh(self): + """Refreshes the status of the job, along with the job result if the job is + newly completed. - In this asynchronous mode, a `Job` is returned immediately, and the user can - manually refresh the status of the job. + Only a non-terminal (open or queued job) can be refreshed; an exception is + raised otherwise. + """ + if self.status.is_terminal: + raise InvalidJobOperationError( + "A {} job cannot be refreshed".format(self.status.value) + ) + self._status = self._connection.get_job_status(self.id) + if self._status == JobStatus.COMPLETE: + self._result = self._connection.get_job_result(self.id) - Args: - program (strawberryfields.Program): the quantum circuit - shots (int): the number of shots for which to run the job + def cancel(self): + """Cancels the job. - Returns: - strawberryfields.engine.Job: the created remote job + Only a non-terminal (open or queued job) can be cancelled; an exception is + raised otherwise. """ - bb = to_blackbird(program) - # pylint: disable=protected-access - bb._target["name"] = self.target - # pylint: disable=protected-access - bb._target["options"] = {"shots": shots} - return self._connection.create_job(bb.serialize()) + if self.status.is_terminal: + raise InvalidJobOperationError( + "A {} job cannot be cancelled".format(self.status.value) + ) + self._connection.cancel_job(self.id) + + +class RequestMethod(enum.Enum): + """Defines the valid request methods for messages sent to the remote job platform.""" + + GET = "get" + POST = "post" + PATCH = "patch" class Connection: @@ -774,9 +769,7 @@ def create_job(self, circuit: str) -> Job: "Job creation failed: {}".format(self._request_error_message(response)) ) - def get_all_jobs( - self, after: datetime.datetime = datetime(1970, 1, 1) - ) -> List[Job]: + def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: """Gets all jobs created by the user, optionally filtered by datetime. Args: @@ -887,126 +880,132 @@ def _request_error_message(response: requests.Response) -> str: ) -class RequestMethod(enum.Enum): - """Defines the valid request methods for messages sent to the remote job platform.""" +class StarshipEngine: + """A quantum program executor engine that that provides a simple interface for + running remote jobs in a synchronous or asynchronous manner. - GET = "get" - POST = "post" - PATCH = "patch" + **Example:** + The following example instantiates an engine with the default configuration, and + runs jobs both synchronously and asynchronously. -class Job: - """Represents a remote job that can be queried for its status or result. + .. code-block:: python - This object should not be instantiated directly, but returned by an `Engine` or - `Connection` when a job is run. + engine = StarshipEngine("chip2") + + # Run a job synchronously + result = engine.run(program, shots=1) + # (Engine blocks until job is complete) + result # [[0, 1, 0, 2, 1, 0, 0, 0]] + + # Run a job synchronously, but cancel it before it is completed + result = engine.run(program, shots=1) + ^C # KeyboardInterrupt cancels the job + + # Run a job asynchronously + job = engine.run_async(program, shots=1) + job.status # "queued" + job.result # InvalidJobOperationError + # (After some time...) + job.refresh() + job.status # "complete" + job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] Args: - id_ (str): the job ID - status (strawberryfields.engine.JobStatus): the job status - connection (strawberryfields.engine.Connection): the connection over which the - job is managed + target (str): the target backend + connection (strawberryfields.engine.Connection): a connection to the remote job + execution platform """ - def __init__(self, id_: str, status: JobStatus, connection: Connection): - self._id = id_ - self._status = status - self._connection = connection - self._result = None + POLLING_INTERVAL_SECONDS = 1 + VALID_TARGETS = ("chip2",) - @property - def id(self) -> str: - """The job ID. + def __init__(self, target: str, connection: Connection = None): + if target not in self.VALID_TARGETS: + raise ValueError( + "Invalid engine target: {} (valid targets: {})".format( + target, self.VALID_TARGETS + ) + ) + if connection is None: + # TODO use `load_config` when implemented + config = DEFAULT_CONFIG["api"] + connection = Connection( + token=config["authentication_token"], + host=config["hostname"], + port=config["port"], + use_ssl=config["use_ssl"], + ) - Returns: - str: the job ID - """ - return self._id + self._target = target + self._connection = connection @property - def status(self) -> JobStatus: - """The job status. + def target(self) -> str: + """The target backend used by the engine. Returns: - strawberryfields.engine.JobStatus: the job status + str: the target backend used by the engine """ - return self._status + return self._target @property - def result(self) -> Result: - """The job result. - - This is only defined for complete jobs, and raises an exception for any other - status. + def connection(self) -> Connection: + """The connection object used by the engine. Returns: - strawberryfields.engine.Result: the result + strawberryfields.engine.Connection: the connection object used by the engine """ - if self.status != JobStatus.COMPLETE: - raise AttributeError( - "The result is undefined for jobs that are not complete " - "(current status: {})".format(self.status.value) - ) - return self._result + return self._connection - def refresh(self): - """Refreshes the status of the job, along with the job result if the job is - newly completed. + def run(self, program: Program, shots: int = 1) -> Optional[Result]: + """Runs a remote job synchronously. - Only a non-terminal (open or queued job) can be refreshed; an exception is - raised otherwise. - """ - if self.status.is_terminal: - raise InvalidJobOperationError( - "A {} job cannot be refreshed".format(self.status.value) - ) - self._status = self._connection.get_job_status(self.id) - if self._status == JobStatus.COMPLETE: - self._result = self._connection.get_job_result(self.id) + In this synchronous mode, the engine blocks until the job is completed, failed, or + cancelled. If the job completes successfully, the result is returned; if the job + fails or is cancelled, ``None`` is returned. - def cancel(self): - """Cancels the job. + Args: + program (strawberryfields.Program): the quantum circuit + shots (int): the number of shots for which to run the job - Only a non-terminal (open or queued job) can be cancelled; an exception is - raised otherwise. + Returns: + [strawberryfields.engine.Result, None]: the job result if successful, and + ``None`` otherwise """ - if self.status.is_terminal: - raise InvalidJobOperationError( - "A {} job cannot be cancelled".format(self.status.value) - ) - self._connection.cancel_job(self.id) - - -class JobStatus(enum.Enum): - """Represents the status of a remote job. - - This class maps a set of job statuses to the string representations returned by the - remote platform. - """ - - OPEN = "open" - QUEUED = "queued" - CANCELLED = "cancelled" - COMPLETE = "complete" - FAILED = "failed" - - def __repr__(self) -> str: - return self.value + job = self.run_async(program, shots) + try: + while True: + job.refresh() + if job.status == JobStatus.COMPLETE: + return job.result + if job.status == JobStatus.FAILED: + raise JobFailedError( + "The computation failed on the remote platform; please try again." + ) + time.sleep(self.POLLING_INTERVAL_SECONDS) + except KeyboardInterrupt: + self._connection.cancel_job(job.id) - def __str__(self) -> str: - return self.value + def run_async(self, program: Program, shots: int = 1) -> Job: + """Runs a remote job asynchronously. - @property - def is_terminal(self) -> bool: - """Checks if this status represents a final and immutable state. + In this asynchronous mode, a `Job` is returned immediately, and the user can + manually refresh the status of the job. - This method is generally used to determine if an operation is valid for a given - status. + Args: + program (strawberryfields.Program): the quantum circuit + shots (int): the number of shots for which to run the job Returns: - bool: ``True`` if the job is terminal, and ``False`` otherwise + strawberryfields.engine.Job: the created remote job """ - return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) + bb = to_blackbird(program) + # pylint: disable=protected-access + bb._target["name"] = self.target + # pylint: disable=protected-access + bb._target["options"] = {"shots": shots} + return self._connection.create_job(bb.serialize()) class Engine(LocalEngine): From cf2db43b5b562091b6732253cd7e244ceb1e550d Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 16:29:33 -0500 Subject: [PATCH 132/335] More tests --- strawberryfields/engine.py | 8 +- tests/frontend/test_engine.py | 169 ++++++++++++++++++++++++---------- 2 files changed, 120 insertions(+), 57 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index c81c124df..26d4c7328 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -852,9 +852,7 @@ def ping(self) -> bool: bool: ``True`` if the connection is successful, and ``False`` otherwise """ response = self._get("/healthz") - if response.status_code == 200: - return True - return False + return response.status_code == 200 def _get(self, path: str, **kwargs) -> requests.Response: return self._request(RequestMethod.GET, path, **kwargs) @@ -980,9 +978,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: if job.status == JobStatus.COMPLETE: return job.result if job.status == JobStatus.FAILED: - raise JobFailedError( - "The computation failed on the remote platform; please try again." - ) + raise JobFailedError("The computation failed; please try again.") time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index e6aa6300f..00c82de6d 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -21,9 +21,10 @@ from strawberryfields import ops from strawberryfields.engine import ( Connection, - RequestFailedError, + InvalidJobOperationError, Job, JobStatus, + RequestFailedError, Result, StarshipEngine, ) @@ -84,71 +85,51 @@ def get_job_status(self, _id): ) -class TestStarshipEngine: - """Tests for the `StarshipEngine` class.""" - - def test_run_complete(self, connection, prog, monkeypatch): - """Tests a successful synchronous job execution.""" - id_, result_expected = "123", [[1, 2], [3, 4]] - - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) - - engine = StarshipEngine("chip2", connection=connection) - result = engine.run(prog) +class TestResult: + """Tests for the ``Result`` class.""" - assert result.samples.T.tolist() == result_expected + def stateless_result_raises_on_state_access(self): + result = Result([[1, 2], [3, 4]], is_stateful=False) with pytest.raises(AttributeError): _ = result.state - def test_run_cancelled(self): - """Tests a manual cancellation of synchronous job execution.""" - # TODO - def test_run_async(self, connection, prog, monkeypatch): - """Tests a successful asynchronous job execution.""" - id_, result_expected = "123", [[1, 2], [3, 4]] +class TestJob: + """Tests for the ``Job`` class.""" - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) + def incomplete_job_raises_on_result_access(self): + job = Job("abc", status=JobStatus.QUEUED) - engine = StarshipEngine("chip2", connection=connection) - job = engine.run_async(prog) - assert job.status == JobStatus.OPEN + with pytest.raises(AttributeError): + _ = job.result - for _ in range(server.REQUESTS_BEFORE_COMPLETE): + def terminal_job_raises_on_refresh(self): + job = Job("abc", status=JobStatus.COMPLETE) + + with pytest.raises(InvalidJobOperationError): job.refresh() - assert job.status == JobStatus.COMPLETE - assert job.result.samples.T.tolist() == result_expected + def terminal_job_raises_on_cancel(self): + job = Job("abc", status=JobStatus.COMPLETE) - with pytest.raises(AttributeError): - _ = job.result.state + with pytest.raises(InvalidJobOperationError): + job.cancel() class TestConnection: - """Tests for the `Connection` class.""" + """Tests for the ``Connection`` class.""" + + def test_init(self): + token, host, port, use_ssl = "token", "host", 123, True + connection = Connection(token, host, port, use_ssl) + + assert connection.token == token + assert connection.host == host + assert connection.port == port + assert connection.use_ssl == use_ssl + + assert connection.base_url == "https://host:123" def test_create_job(self, connection, monkeypatch): """Tests a successful job creation flow.""" @@ -258,3 +239,89 @@ def test_get_job_result_error(self, connection, monkeypatch): with pytest.raises(RequestFailedError): connection.get_job_result("123") + + def test_cancel_job(self, connection, monkeypatch): + """Tests a successful job cancellation request.""" + monkeypatch.setattr(Connection, "_patch", mock_return(mock_response(204, {}))) + + connection.cancel_job("123") + + def test_cancel_job_error(self, connection, monkeypatch): + """Tests a successful job cancellation request.""" + monkeypatch.setattr(Connection, "_patch", mock_return(mock_response(404, {}))) + + with pytest.raises(RequestFailedError): + connection.cancel_job("123") + + def test_ping_success(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(200, {}))) + + assert connection.ping() + + def test_ping_failure(self, connection, monkeypatch): + monkeypatch.setattr(Connection, "_get", mock_return(mock_response(500, {}))) + + assert not connection.ping() + + +class TestStarshipEngine: + """Tests for the ``StarshipEngine`` class.""" + + def test_run_complete(self, connection, prog, monkeypatch): + """Tests a successful synchronous job execution.""" + id_, result_expected = "123", [[1, 2], [3, 4]] + + server = MockServer() + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), + ) + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) + + engine = StarshipEngine("chip2", connection=connection) + result = engine.run(prog) + + assert result.samples.T.tolist() == result_expected + + with pytest.raises(AttributeError): + _ = result.state + + def test_run_cancelled(self): + """Tests a manual cancellation of synchronous job execution.""" + # TODO + + def test_run_async(self, connection, prog, monkeypatch): + """Tests a successful asynchronous job execution.""" + id_, result_expected = "123", [[1, 2], [3, 4]] + + server = MockServer() + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), + ) + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) + + engine = StarshipEngine("chip2", connection=connection) + job = engine.run_async(prog) + assert job.status == JobStatus.OPEN + + for _ in range(server.REQUESTS_BEFORE_COMPLETE): + job.refresh() + + assert job.status == JobStatus.COMPLETE + assert job.result.samples.T.tolist() == result_expected + + with pytest.raises(AttributeError): + _ = job.result.state From 0ab1a1aee8bd44f52978f59e1c33168e895f0143 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 20 Feb 2020 17:28:27 -0500 Subject: [PATCH 133/335] Cleanup --- starship | 4 ++-- strawberryfields/engine.py | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/starship b/starship index e8c7756dd..1e532a5ed 100755 --- a/starship +++ b/starship @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""A simple command-line interface for computing quantum programs on the Xanadu cloud -platform. +"""A standalone command-line interface for computing quantum programs on the Xanadu +cloud platform. """ import sys diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 26d4c7328..49c31d8d4 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -578,7 +578,7 @@ def __str__(self) -> str: def is_terminal(self) -> bool: """Checks if this status represents a final and immutable state. - This method is generally used to determine if an operation is valid for a given + This method is primarily used to determine if an operation is valid for a given status. Returns: @@ -694,6 +694,8 @@ class Connection: MAX_JOBS_REQUESTED = 100 JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + # pylint: disable=bad-continuation + # See: https://github.com/PyCQA/pylint/issues/289 def __init__( self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True ): @@ -959,7 +961,7 @@ def connection(self) -> Connection: def run(self, program: Program, shots: int = 1) -> Optional[Result]: """Runs a remote job synchronously. - In this synchronous mode, the engine blocks until the job is completed, failed, or + In the synchronous mode, the engine blocks until the job is completed, failed, or cancelled. If the job completes successfully, the result is returned; if the job fails or is cancelled, ``None`` is returned. @@ -986,8 +988,8 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. - In this asynchronous mode, a `Job` is returned immediately, and the user can - manually refresh the status of the job. + In the asynchronous mode, a `Job` is returned immediately, and the user can + manually refresh the status and result of the job. Args: program (strawberryfields.Program): the quantum circuit From 7d62a232c6dc53503310f2f8892d13db49b66808 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 20 Feb 2020 21:21:17 -0500 Subject: [PATCH 134/335] Testing config object creation, look_for_config_file function --- strawberryfields/configuration.py | 21 +++--- tests/frontend/test_configuration.py | 104 +++++++++++++++++++++------ 2 files changed, 94 insertions(+), 31 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7ac0ffc4a..910b2df58 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -19,7 +19,7 @@ import logging as log import toml -from appdirs import userconfig_dir +from appdirs import user_config_dir log.getLogger() @@ -67,11 +67,12 @@ def load_config(filename="config.toml", **kwargs): else: log.info("No Strawberry Fields configuration file found.") - config = update_config_from_environmental_variables(_config) + config = update_config_from_environmental_variables(config) return config def create_config_object(**kwargs): + print('kwargs in create_config_object', kwargs) authentication_token = kwargs.get("authentication_token", "") hostname = kwargs.get("hostname", "localhost") use_ssl = kwargs.get("use_ssl", True) @@ -83,8 +84,8 @@ def create_config_object(**kwargs): "authentication_token": authentication_token, "hostname": hostname, "use_ssl": use_ssl, - "port": port, - "debug": debug + "debug": debug, + "port": port } } return config @@ -92,7 +93,7 @@ def create_config_object(**kwargs): def update_config_from_config_file(config, config_file): # Here an example for sectionconfig is api - for section, sectionconfig in _config.items(): + for section, sectionconfig in config.items(): for key in sectionconfig: if key in config_file[section]: # Update from configuration file @@ -104,10 +105,10 @@ def look_for_config_file(filename="config.toml"): # Search the current directory, the directory under environment # variable SF_CONF, and default user config directory, in that order. current_dir = os.getcwd() - envconfig_dir = os.environ.get("SF_CONF", "") - userconfig_dir = user_config_dir("strawberryfields", "Xanadu") + sf_env_config_dir = os.environ.get("SF_CONF", "") + sf_user_config_dir = user_config_dir("strawberryfields", "Xanadu") - directories = [current_dir, envconfig_dir, user_config_dir] + directories = [current_dir, sf_env_config_dir, sf_user_config_dir] for directory in directories: filepath = os.path.join(directory, filename) try: @@ -116,6 +117,7 @@ def look_for_config_file(filename="config.toml"): except FileNotFoundError: config_file = None + # TODO: maybe we need a merge here? return config_file def update_config_from_environmental_variables(config): @@ -134,7 +136,6 @@ def load_config_file(filepath): """ with open(filepath, "r") as f: config_file = toml.load(f) - return config_file -configuration = load_config() +#configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index f29094d0f..8816e7dae 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -33,6 +33,7 @@ hostname = "localhost" use_ssl = true debug = false +port = 443 """ TEST_FILE_ONE_VALUE = """\ @@ -55,37 +56,99 @@ class TestConfiguration: """Tests for the configuration class""" - def test_loading_current_directory(self, tmpdir, monkeypatch): - """Test that the default configuration file can be loaded - from the current directory.""" + def test_create_config_object(self): + assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG + + def test_load_config_file(self, tmpdir, monkeypatch): filename = tmpdir.join("config.toml") with open(filename, "w") as f: f.write(TEST_FILE) + config_file = conf.load_config_file(filepath=filename) + + assert config_file == EXPECTED_CONFIG + + def test_loading_current_directory(self, tmpdir, monkeypatch): + """Test that the default configuration file is loaded from the current + directory, if found.""" + filename = "config.toml" + with monkeypatch.context() as m: - m.setattr(os, "getcwd", lambda: str(tmpdir)) - os.environ["SF_CONF"] = "" - config = conf.Configuration() + m.setattr(os, "getcwd", lambda: tmpdir) + m.setattr(conf, "load_config_file", lambda filepath: filepath) + config_file = conf.look_for_config_file(filename=filename) - assert config._config == EXPECTED_CONFIG - assert config.path == filename + assert config_file == tmpdir.join(filename) - def test_loading_env_variable(self, tmpdir): - """Test that the default configuration file can be loaded - via an environment variable.""" - # TODO: This test does not work if there is already a configuration - # file in place - filename = tmpdir.join("config.toml") + def test_loading_env_variable(self, tmpdir, monkeypatch): + """Test that the correct configuration file is found using the correct + environmental variable. - with open(filename, "w") as f: - f.write(TEST_FILE) + This is a test case for when there is no configuration file in the + current directory.""" - os.environ["SF_CONF"] = str(tmpdir) - config = conf.Configuration() + filename = "config.toml" - assert config._config == EXPECTED_CONFIG - assert config.path == filename + def raise_wrapper(ex): + raise ex + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: "NoConfigFileHere") + m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + + # Need to mock the module specific function + # m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") + # os.environ["SF_CONF"] = lambda: FileNotFoundError + config_file = conf.look_for_config_file(filename=filename) + assert config_file == tmpdir.join("config.toml") + + def test_loading_user_config_dir(self, tmpdir, monkeypatch): + """Test that the correct configuration file is found using the correct + argument to the user_config_dir function. + + This is a test case for when there is no configuration file: + -in the current directory or + -in the directory contained in the corresponding environmental + variable.""" + filename = "config.toml" + + def raise_wrapper(ex): + raise ex + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: "NoConfigFileHere") + m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") + m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + + config_file = conf.look_for_config_file(filename=filename) + assert config_file == tmpdir.join("config.toml") + + def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): + """Test that the the look_for_config_file returns None if the + configuration file is nowhere to be found. + + This is a test case for when there is no configuration file: + -in the current directory or + -in the directory contained in the corresponding environmental + variable + -in the user_config_dir directory of Strawberry Fields.""" + filename = "config.toml" + + def raise_wrapper(ex): + raise ex + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: "NoConfigFileHere") + m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") + m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + + config_file = conf.look_for_config_file(filename=filename) + + assert config_file is None def test_loading_absolute_path(self, tmpdir, monkeypatch): """Test that the default configuration file can be loaded @@ -106,7 +169,6 @@ def test_loading_absolute_path(self, tmpdir, monkeypatch): os.environ["SF_CONF"] = "" config = conf.Configuration(name=str(filename)) - print(config._config, EXPECTED_CONFIG) assert config._config == EXPECTED_CONFIG assert config.path == filename From 1140d439072736c9c5d3f210194a770d4f3efbb4 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 10:16:05 -0500 Subject: [PATCH 135/335] Request numpy job result directly from remote --- strawberryfields/engine.py | 37 +++++++++++++++--------- tests/frontend/test_engine.py | 53 ++++++++++++++++++++++++----------- 2 files changed, 60 insertions(+), 30 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 49c31d8d4..52b894033 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -20,10 +20,11 @@ import abc import collections.abc import enum +import io import json import time from datetime import datetime -from typing import List, Optional +from typing import Dict, List, Optional from urllib.parse import urljoin import numpy as np @@ -89,8 +90,8 @@ def __init__(self, samples, is_stateful=True): self._is_stateful = is_stateful # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - if len(np.shape(samples)) > 1: - samples = np.stack(samples, 1) + # if len(np.shape(samples)) > 1: + # samples = np.stack(samples, 1) self._samples = samples @property @@ -829,9 +830,16 @@ def get_job_result(self, job_id: str) -> Result: Returns: strawberryfields.engine.Result: the job result """ - response = self._get("/jobs/{}/result".format(job_id)) + response = self._get( + "/jobs/{}/result".format(job_id), {"Accept": "application/x-numpy"}, + ) if response.status_code == 200: - return Result(response.json()["result"], is_stateful=False) + # Read the numpy binary data in the payload into memory + with io.BytesIO() as buf: + buf.write(response.body) + buf.seek(0) + samples = np.load(buf) + return Result(samples, is_stateful=False) raise RequestFailedError(self._request_error_message(response)) def cancel_job(self, job_id: str): @@ -856,19 +864,22 @@ def ping(self) -> bool: response = self._get("/healthz") return response.status_code == 200 - def _get(self, path: str, **kwargs) -> requests.Response: - return self._request(RequestMethod.GET, path, **kwargs) + def _get(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + return self._request(RequestMethod.GET, path, headers, **kwargs) - def _post(self, path: str, **kwargs) -> requests.Response: - return self._request(RequestMethod.POST, path, **kwargs) + def _post(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + return self._request(RequestMethod.POST, path, headers, **kwargs) - def _patch(self, path: str, **kwargs) -> requests.Response: - return self._request(RequestMethod.PATCH, path, **kwargs) + def _patch(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + return self._request(RequestMethod.PATCH, path, headers, **kwargs) - def _request(self, method: RequestMethod, path: str, **kwargs) -> requests.Response: + def _request( + self, method: RequestMethod, path: str, headers: Dict = None, **kwargs + ) -> requests.Response: + headers = {} if headers is None else headers return getattr(requests, method.value)( urljoin(self.base_url, path), - headers={"Authorization": self.token}, + headers={"Authorization": self.token, **headers}, **kwargs ) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 00c82de6d..9c015c446 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -13,8 +13,10 @@ # limitations under the License. r"""Unit tests for engine.py""" from datetime import datetime +import io from unittest.mock import MagicMock +import numpy as np import pytest import strawberryfields as sf @@ -57,11 +59,14 @@ def mock_return(return_value): return lambda *args, **kwargs: return_value -def mock_response(status_code, json_return_value): - """A helper function for defining a mock response from the remote platform.""" +def mock_response(status_code, json_body=None, binary_body=None): + """A helper function for creating a mock response with a JSON or binary body.""" response = MagicMock() response.status_code = status_code - response.json.return_value = json_return_value + if json_body: + response.json.return_value = json_body + if binary_body: + response.body = binary_body return response @@ -89,6 +94,8 @@ class TestResult: """Tests for the ``Result`` class.""" def stateless_result_raises_on_state_access(self): + """Tests that `result.state` raises an error for a stateless result. + """ result = Result([[1, 2], [3, 4]], is_stateful=False) with pytest.raises(AttributeError): @@ -99,19 +106,24 @@ class TestJob: """Tests for the ``Job`` class.""" def incomplete_job_raises_on_result_access(self): - job = Job("abc", status=JobStatus.QUEUED) + """Tests that `job.result` raises an error for an incomplete job.""" + job = Job("abc", status=JobStatus.QUEUED, connection=Connection) with pytest.raises(AttributeError): _ = job.result def terminal_job_raises_on_refresh(self): - job = Job("abc", status=JobStatus.COMPLETE) + """Tests that `job.refresh()` raises an error for a complete, failed, or + cancelled job.""" + job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) with pytest.raises(InvalidJobOperationError): job.refresh() def terminal_job_raises_on_cancel(self): - job = Job("abc", status=JobStatus.COMPLETE) + """Tests that `job.cancel()` raises an error for a complete, failed, or + aleady cancelled job.""" + job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) with pytest.raises(InvalidJobOperationError): job.cancel() @@ -121,6 +133,7 @@ class TestConnection: """Tests for the ``Connection`` class.""" def test_init(self): + """Tests that a ``Connection`` is initialized correctly.""" token, host, port, use_ssl = "token", "host", 123, True connection = Connection(token, host, port, use_ssl) @@ -221,17 +234,20 @@ def test_get_job_status_error(self, connection, monkeypatch): def test_get_job_result(self, connection, monkeypatch): """Tests a successful job result request.""" - result_samples = [[1, 2], [3, 4]] + result_samples = np.array([[1, 2], [3, 4]], dtype=np.int8) - monkeypatch.setattr( - Connection, - "_get", - mock_return(mock_response(200, {"result": result_samples})), - ) + with io.BytesIO() as buf: + np.save(buf, result_samples) + buf.seek(0) + monkeypatch.setattr( + Connection, + "_get", + mock_return(mock_response(200, binary_body=buf.getvalue())), + ) result = connection.get_job_result("123") - assert result.samples.T.tolist() == result_samples + assert np.array_equal(result.samples, result_samples) def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" @@ -244,6 +260,7 @@ def test_cancel_job(self, connection, monkeypatch): """Tests a successful job cancellation request.""" monkeypatch.setattr(Connection, "_patch", mock_return(mock_response(204, {}))) + # A successful cancellation does not raise an exception connection.cancel_job("123") def test_cancel_job_error(self, connection, monkeypatch): @@ -254,11 +271,13 @@ def test_cancel_job_error(self, connection, monkeypatch): connection.cancel_job("123") def test_ping_success(self, connection, monkeypatch): + """Tests a successful ping to the remote host.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(200, {}))) assert connection.ping() def test_ping_failure(self, connection, monkeypatch): + """Tests a failed ping to the remote host.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(500, {}))) assert not connection.ping() @@ -269,7 +288,7 @@ class TestStarshipEngine: def test_run_complete(self, connection, prog, monkeypatch): """Tests a successful synchronous job execution.""" - id_, result_expected = "123", [[1, 2], [3, 4]] + id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) server = MockServer() monkeypatch.setattr( @@ -287,7 +306,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert result.samples.T.tolist() == result_expected + assert np.array_equal(result.samples, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -298,7 +317,7 @@ def test_run_cancelled(self): def test_run_async(self, connection, prog, monkeypatch): """Tests a successful asynchronous job execution.""" - id_, result_expected = "123", [[1, 2], [3, 4]] + id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) server = MockServer() monkeypatch.setattr( @@ -321,7 +340,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert job.result.samples.T.tolist() == result_expected + assert np.array_equal(job.result.samples, result_expected) with pytest.raises(AttributeError): _ = job.result.state From c53a6aafc3600581ecdd5de94b5736403f436579 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 10:36:06 -0500 Subject: [PATCH 136/335] Bugfixes --- starship | 16 ++++++---------- strawberryfields/engine.py | 2 +- tests/frontend/test_engine.py | 2 +- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/starship b/starship index 1e532a5ed..b97905859 100755 --- a/starship +++ b/starship @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""A standalone command-line interface for computing quantum programs on the Xanadu -cloud platform. +"""A standalone command-line interface for computing quantum programs on a remote +backend. """ import sys @@ -26,9 +26,7 @@ from strawberryfields.io import load if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="run a blackbird script on the Xanadu cloud platform" - ) + parser = argparse.ArgumentParser(description="run a blackbird script") parser.add_argument( "--token", "-t", help="the API authentication token", required=True ) @@ -45,9 +43,7 @@ if __name__ == "__main__": args = parser.parse_args() - connection = Connection( - token=args.token, host="platform.strawberryfields.ai", port=443, use_ssl=True, - ) + connection = Connection(token=args.token) if args.ping: connection.ping() @@ -63,6 +59,6 @@ if __name__ == "__main__": if result and result.samples is not None: if args.output: with open(args.output, "w") as file: - file.write(str(result.samples.T)) + file.write(str(result.samples)) else: - sys.stdout.write(str(result.samples.T)) + sys.stdout.write(str(result.samples)) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 52b894033..01c1adb3f 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -836,7 +836,7 @@ def get_job_result(self, job_id: str) -> Result: if response.status_code == 200: # Read the numpy binary data in the payload into memory with io.BytesIO() as buf: - buf.write(response.body) + buf.write(response.content) buf.seek(0) samples = np.load(buf) return Result(samples, is_stateful=False) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 9c015c446..953a948be 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -66,7 +66,7 @@ def mock_response(status_code, json_body=None, binary_body=None): if json_body: response.json.return_value = json_body if binary_body: - response.body = binary_body + response.content = binary_body return response From abcbccb26671880209e7341305d87fa5d3e1caae Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 11:07:03 -0500 Subject: [PATCH 137/335] Fix issues regarding job result dimensions --- strawberryfields/engine.py | 9 ++++++--- tests/frontend/test_engine.py | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 01c1adb3f..fdd3a7c6f 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -90,8 +90,8 @@ def __init__(self, samples, is_stateful=True): self._is_stateful = is_stateful # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - # if len(np.shape(samples)) > 1: - # samples = np.stack(samples, 1) + if len(np.shape(samples)) > 1: + samples = np.stack(samples, 1) self._samples = samples @property @@ -839,7 +839,10 @@ def get_job_result(self, job_id: str) -> Result: buf.write(response.content) buf.seek(0) samples = np.load(buf) - return Result(samples, is_stateful=False) + + # NOTE To maintain consistency with other SF components for now, transpose + # the result array from (shots, modes) to (modes, shots) + return Result(samples.T, is_stateful=False) raise RequestFailedError(self._request_error_message(response)) def cancel_job(self, job_id: str): diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 953a948be..f5515c650 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -306,7 +306,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples, result_expected) + assert np.array_equal(result.samples.T, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -340,7 +340,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert np.array_equal(job.result.samples, result_expected) + assert np.array_equal(job.result.samples.T, result_expected) with pytest.raises(AttributeError): _ = job.result.state From 5a35393ac0e52cd93ca4eb50337f871850a34005 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 12:28:39 -0500 Subject: [PATCH 138/335] Clean up docs --- strawberryfields/engine.py | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index fdd3a7c6f..785fc8b7b 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -583,7 +583,7 @@ def is_terminal(self) -> bool: status. Returns: - bool: ``True`` if the job is terminal, and ``False`` otherwise + bool: ``True`` if the job status is terminal, and ``False`` otherwise """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) @@ -591,8 +591,8 @@ def is_terminal(self) -> bool: class Job: """Represents a remote job that can be queried for its status or result. - This object should not be instantiated directly, but returned by an `Engine` or - `Connection` when a job is run. + This object should typically not be instantiated directly, but returned by an + `Engine` or `Connection` when a job is run. Args: id_ (str): the job ID @@ -633,7 +633,7 @@ def result(self) -> Result: status. Returns: - strawberryfields.engine.Result: the result + strawberryfields.engine.Result: the job result """ if self.status != JobStatus.COMPLETE: raise AttributeError( @@ -646,8 +646,7 @@ def refresh(self): """Refreshes the status of the job, along with the job result if the job is newly completed. - Only a non-terminal (open or queued job) can be refreshed; an exception is - raised otherwise. + Only an open or queued job can be refreshed; an exception is raised otherwise. """ if self.status.is_terminal: raise InvalidJobOperationError( @@ -660,8 +659,7 @@ def refresh(self): def cancel(self): """Cancels the job. - Only a non-terminal (open or queued job) can be cancelled; an exception is - raised otherwise. + Only an open or queued job can be cancelled; an exception is raised otherwise. """ if self.status.is_terminal: raise InvalidJobOperationError( @@ -671,7 +669,7 @@ def cancel(self): class RequestMethod(enum.Enum): - """Defines the valid request methods for messages sent to the remote job platform.""" + """Defines the valid request methods for messages sent to the remote platform.""" GET = "get" POST = "post" @@ -698,7 +696,11 @@ class Connection: # pylint: disable=bad-continuation # See: https://github.com/PyCQA/pylint/issues/289 def __init__( - self, token, host="platform.strawberryfields.ai", port=443, use_ssl=True + self, + token: str, + host: str = "platform.strawberryfields.ai", + port: int = 443, + use_ssl: bool = True, ): self._token = token self._host = host @@ -769,15 +771,17 @@ def create_job(self, circuit: str) -> Job: connection=self, ) raise RequestFailedError( - "Job creation failed: {}".format(self._request_error_message(response)) + "Job creation failed: {}".format(self._format_error_message(response)) ) - def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: - """Gets all jobs created by the user, optionally filtered by datetime. + def get_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: + """Gets a list of jobs created by the user, optionally filtered by datetime. + + A maximum of the 100 most recent jobs are returned. Args: after (datetime.datetime): if provided, only jobs more recently created - then ``after`` are returned + then ``after`` are returned Returns: List[strawberryfields.engine.Job]: the jobs @@ -790,7 +794,7 @@ def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) > after ] - raise RequestFailedError(self._request_error_message(response)) + raise RequestFailedError(self._format_error_message(response)) def get_job(self, job_id: str) -> Job: """Gets a job. @@ -808,7 +812,7 @@ def get_job(self, job_id: str) -> Job: status=JobStatus(response.json()["status"]), connection=self, ) - raise RequestFailedError(self._request_error_message(response)) + raise RequestFailedError(self._format_error_message(response)) def get_job_status(self, job_id: str) -> JobStatus: """Returns the status of a job. @@ -840,10 +844,11 @@ def get_job_result(self, job_id: str) -> Result: buf.seek(0) samples = np.load(buf) - # NOTE To maintain consistency with other SF components for now, transpose - # the result array from (shots, modes) to (modes, shots) + # NOTE To maintain consistency with other components for now, transpose + # the received result array from (shots, modes) to (modes, shots), + # which allows us to keep the logic in `Result.samples` unchanged return Result(samples.T, is_stateful=False) - raise RequestFailedError(self._request_error_message(response)) + raise RequestFailedError(self._format_error_message(response)) def cancel_job(self, job_id: str): """Cancels a job. @@ -856,7 +861,7 @@ def cancel_job(self, job_id: str): ) if response.status_code == 204: return - raise RequestFailedError(self._request_error_message(response)) + raise RequestFailedError(self._format_error_message(response)) def ping(self) -> bool: """Tests the connection to the remote backend. @@ -867,17 +872,23 @@ def ping(self) -> bool: response = self._get("/healthz") return response.status_code == 200 - def _get(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + def _get( + self, path: str, headers: Dict[str, str] = None, **kwargs + ) -> requests.Response: return self._request(RequestMethod.GET, path, headers, **kwargs) - def _post(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + def _post( + self, path: str, headers: Dict[str, str] = None, **kwargs + ) -> requests.Response: return self._request(RequestMethod.POST, path, headers, **kwargs) - def _patch(self, path: str, headers: Dict = None, **kwargs) -> requests.Response: + def _patch( + self, path: str, headers: Dict[str, str] = None, **kwargs + ) -> requests.Response: return self._request(RequestMethod.PATCH, path, headers, **kwargs) def _request( - self, method: RequestMethod, path: str, headers: Dict = None, **kwargs + self, method: RequestMethod, path: str, headers: Dict[str, str] = None, **kwargs ) -> requests.Response: headers = {} if headers is None else headers return getattr(requests, method.value)( @@ -887,7 +898,7 @@ def _request( ) @staticmethod - def _request_error_message(response: requests.Response) -> str: + def _format_error_message(response: requests.Response) -> str: body = response.json() return "{} ({}): {}".format( body.get("status_code", ""), body.get("code", ""), body.get("detail", "") From a95f14d761b3fe864ec1988b1b404278a9ebc28e Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 12:34:48 -0500 Subject: [PATCH 139/335] APIClient has been absorbed into the new classes --- strawberryfields/api_client.py | 562 ------------------------------ tests/frontend/test_api_client.py | 415 ---------------------- 2 files changed, 977 deletions(-) delete mode 100644 strawberryfields/api_client.py delete mode 100644 tests/frontend/test_api_client.py diff --git a/strawberryfields/api_client.py b/strawberryfields/api_client.py deleted file mode 100644 index 8066c8d16..000000000 --- a/strawberryfields/api_client.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module provides a thin client that communicates with the Xanadu Platform API over the HTTP -protocol, based on the requests module. It also provides helper classes to facilitate interacting -with this API via the Resource subclasses, as well as the ResourceManager wrapper around APIClient -that is available for each resource. - -A single :class:`~.APIClient` instance can be used throughout one's session in the application. -The application will attempt to configure the :class:`~.APIClient` instance using a configuration -file or defaults, but the user can choose to override various parameters of the :class:`~.APIClient` -manually. - -A typical use looks like this: - .. code-block:: python - - job = Job() - circuit = ''' - name StateTeleportation - version 1.0 - target gaussian (shots=1000) - - complex alpha = 1+0.5j - Coherent(alpha) | 0 - Squeezed(-4) | 1 - Squeezed(4) | 2 - BSgate(pi/4, 0) | (1, 2) - BSgate(pi/4, 0) | (0, 1) - MeasureX | 0 - MeasureP | 1 - Xgate(sqrt(2)*q0) | 2 - Zgate(sqrt(2)*q1) | 2 - MeasureHeterodyne() | 2 - ''' - job.manager.create(circuit=circuit) - - job.id # Returns the Job's id Field for the job that was sent to the server - job.reload() # Fetches the latest job data from the server - job.status # Prints the status of this job - job.result # Returns a JobResult object - job.circuit # Returns a JobCircuit object - - job.result.reload() # Reloads the JobResult object from the API - - job.manager.get(1536) # Fetches job 1536 from the server and updates the instance -""" - - -import urllib -import json -import warnings - -import dateutil.parser - -import requests -from strawberryfields import configuration - - -def join_path(base_path, path): - """ - Joins two paths, a base path and another path and returns a string. - - Args: - base_path (str): The left side of the joined path. - path (str): The right side of the joined path. - - Returns: - str: A joined path. - """ - return urllib.parse.urljoin("{}/".format(base_path), path) - - -class MethodNotSupportedException(TypeError): - """ - Exception to be raised when a ResourceManager method is not supported for a - particular Resource. - """ - - pass - - -class ObjectAlreadyCreatedException(TypeError): - """ - Exception to be raised when an object has already been created but the user - is attempting to create it again. - """ - - pass - - -class JobNotQueuedError(Exception): - """ - Raised when a job is not successfully queued for whatever reason. - """ - - pass - - -class JobExecutionError(Exception): - """ - Raised when job execution failed and a job result does not exist. - """ - - pass - - -class APIClient: - """ - An object that allows the user to connect to the Xanadu Platform API. - """ - - USER_AGENT = "strawberryfields-api-client/0.1" - - ALLOWED_HOSTNAMES = ["localhost", "localhost:8080", "platform.strawberryfields.ai"] - - DEFAULT_HOSTNAME = "localhost" - - def __init__(self, **kwargs): - """ - Initialize the API client with various parameters. - """ - self._config = self.get_configuration_from_config() - - # Override any values that are explicitly passed when initializing client - self._config.update(kwargs) - - if self._config["hostname"] is None: - raise ValueError("hostname parameter is missing") - - if self._config["hostname"] not in self.ALLOWED_HOSTNAMES: - raise ValueError("hostname parameter not in allowed list") - - self.USE_SSL = self._config["use_ssl"] - if not self.USE_SSL: - warnings.warn("Connecting insecurely to API server", UserWarning) - - self.HOSTNAME = self._config["hostname"] - self.BASE_URL = "{}://{}".format("https" if self.USE_SSL else "http", self.HOSTNAME) - if self._config["port"] != 443: - self.BASE_URL = "{}:{}".format(self.BASE_URL, self._config["port"]) - self.AUTHENTICATION_TOKEN = self._config["authentication_token"] - self.HEADERS = {"User-Agent": self.USER_AGENT} - self.DEBUG = self._config["debug"] - - if self.AUTHENTICATION_TOKEN: - self.set_authorization_header(self.AUTHENTICATION_TOKEN) - - if self.DEBUG: - self.errors = [] - self.responses = [] - - def get_configuration_from_config(self): - """ - Retrieve configuration from environment variables or config file based on Strawberry Fields - configuration. - """ - return configuration.Configuration().api - - def authenticate(self, username, password): - """ - Retrieve an authentication token from the server via username - and password authentication and calls set_authorization_header. - - Args: - username (str): a user name - password (str): password - """ - raise NotImplementedError() - - def set_authorization_header(self, authentication_token): - """ - Adds the authorization header to the headers dictionary to be included - with all API requests. - - Args: - authentication_token (str): an authentication token used to access the API - """ - self.HEADERS["Authorization"] = authentication_token - - def join_path(self, path): - """ - Joins a base url with an additional path (e.g., a resource name and ID). - - Args: - path (str): A path to be joined with ``BASE_URL`` - - Returns: - str: resulting joined path - """ - return join_path(self.BASE_URL, path) - - def request(self, method, **params): - """ - Calls ``method`` with ``params`` after applying headers. Records the request type and - parameters to ``self.errors`` if the request is not successful, and the response to - ``self.responses`` if a response is returned from the server. - - Args: - method: one of ``requests.get`` or ``requests.post`` - **params: the parameters to pass on to the method (e.g. ``url``, ``data``, etc.) - - Returns: - requests.Response: a response object, or None if no response could be fetched - """ - supported_methods = (requests.get, requests.post) - if method not in supported_methods: - raise TypeError("Unexpected or unsupported method provided") - - params["headers"] = self.HEADERS - - try: - response = method(**params) - except Exception as e: - if self.DEBUG: - self.errors.append((method, params, e)) - raise - - if self.DEBUG: - self.responses.append(response) - - return response - - def get(self, path): - """ - Sends a GET request to the provided path. Returns a response object. - - Args: - path (str): path to send the GET request to - - Returns: - requests.Response: A response object, or None if no response could be fetched - """ - return self.request(requests.get, url=self.join_path(path)) - - def post(self, path, payload): - """ - Converts payload to a JSON string. Sends a POST request to the provided - path. Returns a response object. - - Args: - path (str): path to send the GET request to - payload: JSON serializable object to be sent to the server - - Returns: - requests.Response: A response object, or None if no response could be fetched - """ - return self.request(requests.post, url=self.join_path(path), data=json.dumps(payload)) - - -class ResourceManager: - """ - This class handles all interactions with APIClient by the resource. - """ - - http_response_data = None - http_response_status_code = None - errors = None - - def __init__(self, resource, client=None): - """ - Initialize the manager with resource and client instances. A client - instance is used as a persistent HTTP communications object, and a - resource instance corresponds to a particular type of resource (e.g., - Job) - """ - self.resource = resource - self.client = client or APIClient() - self.errors = [] - - def join_path(self, path): - """ - Joins a resource base path with an additional path (e.g., an ID) - """ - return join_path(self.resource.PATH, path) - - def get(self, resource_id=None): - """ - Attempts to retrieve a particular record by sending a GET - request to the appropriate endpoint. If successful, the resource - object is populated with the data in the response. - - Args: - resource_id (int): the ID of an object to be retrieved - """ - if "GET" not in self.resource.SUPPORTED_METHODS: - raise MethodNotSupportedException("GET method on this resource is not supported") - - if resource_id is not None: - response = self.client.get(self.join_path(str(resource_id))) - else: - response = self.client.get(self.resource.PATH) - self.handle_response(response) - - def create(self, **params): - """ - Attempts to create a new instance of a resource by sending a POST - request to the appropriate endpoint. - - Args: - **params: arbitrary parameters to be passed on to the POST request - """ - if "POST" not in self.resource.SUPPORTED_METHODS: - raise MethodNotSupportedException("POST method on this resource is not supported") - - if self.resource.id: - raise ObjectAlreadyCreatedException("ID must be None when calling create") - - response = self.client.post(self.resource.PATH, params) - - self.handle_response(response) - - def handle_response(self, response): - """ - Store the status code on the manager object and handle the response - based on the status code. - - Args: - response (requests.Response): a response object to be parsed - """ - if hasattr(response, "status_code"): - self.http_response_data = response.json() - self.http_response_status_code = response.status_code - - if response.status_code in (200, 201): - self.handle_success_response(response) - else: - self.handle_error_response(response) - else: - self.handle_no_response() - - def handle_no_response(self): - """ - Placeholder method to handle an unsuccessful request (e.g. due to no network connection). - """ - warnings.warn("Your request could not be completed") - - def handle_success_response(self, response): - """ - Handles a successful response by refreshing the instance fields. - - Args: - response (requests.Response): a response object to be parsed - """ - self.refresh_data(response.json()) - - def handle_error_response(self, response): - """ - Handles an error response that is returned by the server. - - Args: - response (requests.Response): a response object to be parsed - """ - - error = {"status_code": response.status_code, "content": response.json()} - self.errors.append(error) - try: - response.raise_for_status() - except Exception as e: - raise Exception(response.text) from e - - def refresh_data(self, data): - """ - Refreshes the instance's attributes with the provided data and - converts it to the correct type. - - Args: - data (dict): A dictionary containing keys and values of data to be stored on the object. - """ - for field in self.resource.fields: - field.set(data.get(field.name, None)) - - if hasattr(self.resource, "refresh_data"): - self.resource.refresh_data() - - -class Resource: - """ - A base class for an API resource. This class should be extended for each - resource endpoint. - """ - - SUPPORTED_METHODS = () - PATH = "" - fields = () - - def __init__(self, client=None): - """ - Initialize the Resource by populating attributes based on fields and setting a manager. - - Args: - client (APIClient): An APIClient instance to use as a client. - """ - self.manager = ResourceManager(self, client=client) - for field in self.fields: - setattr(self, field.name, field) - - def reload(self): - """ - A helper method to fetch the latest data from the API. - """ - if not hasattr(self, "id"): - raise TypeError("Resource does not have an ID") - - if self.id: - self.manager.get(self.id.value) - else: - warnings.warn("Could not reload resource data", UserWarning) - - -class Field: - """ - A helper class to classify and clean data returned by the API. - """ - - value = None - - def __init__(self, name, clean=str): - """ - Initialize the Field object with a name and a cleaning function. - - Args: - name (str): A string representing the name of the field (e.g., "created_at"). - clean: A method that returns a cleaned value of the field, of the correct type. - """ - self.name = name - self.clean = clean - - def __repr__(self): - """ - Return the string representation of the value. - """ - return "<{} {}: {}>".format(self.name, self.__class__.__name__, str(self.value)) - - def __bool__(self): - """ - Use the value to determine boolean state. - """ - return self.value is not None - - def set(self, value): - """ - Set the value of the Field to `value`. - - Args: - value: The value to be stored on the Field object. - """ - self.value = value - - @property - def cleaned_value(self): - """ - Return the cleaned value of the field (for example, an integer or Date - object) - """ - return self.clean(self.value) if self.value is not None else None - - -class Job(Resource): - """ - The API resource corresponding to jobs. - """ - - SUPPORTED_METHODS = ("GET", "POST") - PATH = "jobs" - - def __init__(self, client=None): - """ - Initialize the Job resource with a set of pre-defined fields. - """ - self.fields = ( - Field("id", int), - Field("status"), - Field("result_url"), - Field("circuit_url"), - Field("created_at", dateutil.parser.parse), - Field("started_at", dateutil.parser.parse), - Field("finished_at", dateutil.parser.parse), - Field("running_time"), - ) - - self.result = None - self.circuit = None - - super().__init__(client=client) - - @property - def is_complete(self): - """ - Returns True if the job status is "COMPLETE". Case insensitive. Returns False otherwise. - """ - return self.status.value and self.status.value.upper() == "COMPLETE" - - @property - def is_failed(self): - """ - Returns True if the job status is "FAILED". Case insensitive. Returns False otherwise. - """ - return self.status.value and self.status.value.upper() == "FAILED" - - def refresh_data(self): - """ - Refresh the job fields and attach a JobResult and JobCircuit object to the Job instance. - """ - if self.result is None: - self.result = JobResult(self.id.value, client=self.manager.client) - - if self.circuit is None: - self.circuit = JobCircuit(self.id.value, client=self.manager.client) - - -class JobResult(Resource): - """ - The API resource corresponding to the job result. - """ - - SUPPORTED_METHODS = ("GET",) - PATH = "jobs/{job_id}/result" - - def __init__(self, job_id, client=None): - """ - Initialize the JobResult resource with a pre-defined field. - - Args: - job_id (int): The ID of the Job object corresponding to the JobResult object. - """ - self.fields = (Field("result", json.loads),) - - self.PATH = self.PATH.format(job_id=job_id) - super().__init__(client=client) - - -class JobCircuit(Resource): - """ - The API resource corresponding to the job circuit. - """ - - SUPPORTED_METHODS = ("GET",) - PATH = "jobs/{job_id}/circuit" - - def __init__(self, job_id, client=None): - """ - Initialize the JobCircuit resource with a pre-defined field. - - Args: - job_id (int): The ID of the Job object corresponding to the JobResult object. - """ - self.fields = (Field("circuit"),) - - self.PATH = self.PATH.format(job_id=job_id) - super().__init__(client=client) diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py deleted file mode 100644 index edd1e9602..000000000 --- a/tests/frontend/test_api_client.py +++ /dev/null @@ -1,415 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r""" -Unit tests for API client -""" - -import pytest -import json -from strawberryfields import api_client -from strawberryfields import configuration -from strawberryfields.api_client import ( - requests, - Job, - ResourceManager, - ObjectAlreadyCreatedException, - MethodNotSupportedException, -) - -from unittest.mock import MagicMock - -pytestmark = pytest.mark.frontend - -status_codes = requests.status_codes.codes - - -@pytest.fixture -def client(): - return api_client.APIClient() - - -SAMPLE_JOB_CREATE_RESPONSE = { - "id": 29583, - "status": "queued", - "result_url": "https://platform.xanadu.ai/jobs/29583/result", - "circuit_url": "https://platform.xanadu.ai/jobs/29583/circuit", - "created_at": "2019-05-24T15:55:43.872531Z", - "started_at": None, - "finished_at": None, - "running_time": None, -} - -SAMPLE_JOB_RESPONSE = { - "id": 19856, - "status": "complete", - "result_url": "https://platform.xanadu.ai/jobs/19856/result", - "circuit_url": "https://platform.xanadu.ai/jobs/19856/circuit", - "created_at": "2019-05-24T15:55:43.872531Z", - "started_at": "2019-05-24T16:01:12.145636Z", - "finished_at": "2019-05-24T16:01:12.145645Z", - "running_time": "9µs", -} - - -class MockResponse: - """ - A helper class to generate a mock response based on status code. Mocks - the `json` and `text` attributes of a requests.Response class. - """ - - status_code = None - - def __init__(self, status_code): - self.status_code = status_code - - def json(self): - return self.possible_responses[self.status_code] - - @property - def text(self): - return json.dumps(self.json()) - - def raise_for_status(self): - raise requests.exceptions.HTTPError() - - -class MockPOSTResponse(MockResponse): - possible_responses = { - 201: SAMPLE_JOB_CREATE_RESPONSE, - 400: { - "code": "parse-error", - "detail": ( - "The blackbird script could not be parsed. " - "Please fix errors in the script and try again." - ), - }, - 401: {"code": "unauthenticated", "detail": "Requires authentication"}, - 409: { - "code": "unsupported-circuit", - "detail": ("This circuit is not compatible with the specified hardware."), - }, - 500: { - "code": "server-error", - "detail": ("Unexpected server error. Please try your request again " "later."), - }, - } - - -class MockGETResponse(MockResponse): - possible_responses = { - 200: SAMPLE_JOB_RESPONSE, - 401: {"code": "unauthenticated", "detail": "Requires authentication"}, - 404: { - "code": "not-found", - "detail": "The requested resource could not be found or does not exist.", - }, - 500: { - "code": "server-error", - "detail": ("Unexpected server error. Please try your request again " "later."), - }, - } - - status_code = None - - def __init__(self, status_code): - self.status_code = status_code - - def json(self): - return self.possible_responses[self.status_code] - - def raise_for_status(self): - raise requests.exceptions.HTTPError() - - -class TestAPIClient: - def test_init_default_client(self): - """ - Test that initializing a default client generates an APIClient with the expected params. - """ - client = api_client.APIClient(use_ssl=True, authentication_token="") - assert client.USE_SSL is True - assert not client.AUTHENTICATION_TOKEN - assert client.BASE_URL.startswith("https://") - assert client.HEADERS["User-Agent"] == client.USER_AGENT - - def test_init_default_client_no_ssl(self): - """ - Test setting use_ssl to False when initializing a client generates the correct base URL and - sets the correct flag. - """ - client = api_client.APIClient(use_ssl=False) - assert client.USE_SSL is False - assert client.BASE_URL.startswith("http://") - - def test_init_custom_token_client(self): - """ - Test that the token is correctly set when initializing a client. - """ - test_token = "TEST" - client = api_client.APIClient(authentication_token=test_token) - assert client.AUTHENTICATION_TOKEN == test_token - - def test_init_custom_token_client_headers_set(self, monkeypatch): - """ - Test that set_authentication_token is being called when setting a custom token. - """ - test_token = "TEST" - mock_set_authorization_header = MagicMock() - monkeypatch.setattr( - api_client.APIClient, "set_authorization_header", mock_set_authorization_header - ) - api_client.APIClient(authentication_token=test_token) - mock_set_authorization_header.assert_called_once_with(test_token) - - def test_set_authorization_header(self): - """ - Test that the authentication token is added to the header correctly. - """ - client = api_client.APIClient() - - authentication_token = MagicMock() - client.set_authorization_header(authentication_token) - assert client.HEADERS["Authorization"] == authentication_token - - def test_get_configuration_from_config(self, client, monkeypatch): - """ - Test that the configuration is loaded from file correctly (not yet implemented). - """ - mock_configuration = MagicMock() - monkeypatch.setattr(configuration, "Configuration", mock_configuration.Configuration) - assert client.get_configuration_from_config() == mock_configuration.Configuration().api - - def test_authenticate(self, client): - """ - Test that the client can authenticate correctly (not yet implemented). - """ - with pytest.raises(NotImplementedError): - username = "TEST_USER" - password = "TEST_PASSWORD" - client.authenticate(username, password) - - def test_join_path(self, client): - """ - Test that two paths can be joined and separated by a forward slash. - """ - assert client.join_path("jobs") == "{client.BASE_URL}/jobs".format(client=client) - - -class TestResourceManager: - def test_init(self): - """ - Test that a resource manager instance can be initialized correctly with a resource and - client instance. Assets that both manager.resource and manager.client are set. - """ - resource = MagicMock() - client = MagicMock() - manager = ResourceManager(resource, client) - - assert manager.resource == resource - assert manager.client == client - - def test_join_path(self): - """ - Test that the resource path can be joined corectly with the base path. - """ - mock_resource = MagicMock() - mock_resource.PATH = "some-path" - - manager = ResourceManager(mock_resource, MagicMock()) - assert manager.join_path("test") == "some-path/test" - - def test_get_unsupported(self): - """ - Test a GET request with a resource that does not support it. Asserts that - MethodNotSupportedException is raised. - """ - mock_resource = MagicMock() - mock_resource.SUPPORTED_METHODS = () - manager = ResourceManager(mock_resource, MagicMock()) - with pytest.raises(MethodNotSupportedException): - manager.get(1) - - def test_get(self, monkeypatch): - """ - Test a successful GET request. Tests that manager.handle_response is being called with - the correct Response object. - """ - mock_resource = MagicMock() - mock_client = MagicMock() - mock_response = MagicMock() - mock_client.get = MagicMock(return_value=mock_response) - - mock_resource.SUPPORTED_METHODS = ("GET",) - - manager = ResourceManager(mock_resource, mock_client) - monkeypatch.setattr(manager, "handle_response", MagicMock()) - - manager.get(1) - - # TODO test that this is called with correct path - mock_client.get.assert_called_once() - manager.handle_response.assert_called_once_with(mock_response) - - def test_create_unsupported(self): - """ - Test a POST (create) request with a resource that does not support that type or request. - Asserts that MethodNotSupportedException is raised. - """ - mock_resource = MagicMock() - mock_resource.SUPPORTED_METHODS = () - manager = ResourceManager(mock_resource, MagicMock()) - with pytest.raises(MethodNotSupportedException): - manager.create() - - def test_create_id_already_exists(self): - """ - Tests that once an object is created, create method can not be called again. Asserts that - ObjectAlreadyCreatedException is raised. - """ - mock_resource = MagicMock() - mock_resource.SUPPORTED_METHODS = ("POST",) - mock_resource.id = MagicMock() - manager = ResourceManager(mock_resource, MagicMock()) - with pytest.raises(ObjectAlreadyCreatedException): - manager.create() - - def test_create(self, monkeypatch): - """ - Tests a successful POST (create) method. Asserts that handle_response is called with the - correct Response object. - """ - mock_resource = MagicMock() - mock_client = MagicMock() - mock_response = MagicMock() - mock_client.post = MagicMock(return_value=mock_response) - - mock_resource.SUPPORTED_METHODS = ("POST",) - mock_resource.id = None - - manager = ResourceManager(mock_resource, mock_client) - monkeypatch.setattr(manager, "handle_response", MagicMock()) - - manager.create() - - # TODO test that this is called with correct path and params - mock_client.post.assert_called_once() - manager.handle_response.assert_called_once_with(mock_response) - - def test_handle_response(self, monkeypatch): - """ - Tests that a successful response initiates a call to handle_success_response, and that an - error response initiates a call to handle_error_response. - """ - mock_resource = MagicMock() - mock_client = MagicMock() - mock_response = MagicMock() - mock_handle_success_response = MagicMock() - mock_handle_error_response = MagicMock() - - manager = ResourceManager(mock_resource, mock_client) - - monkeypatch.setattr(manager, "handle_success_response", mock_handle_success_response) - - monkeypatch.setattr(manager, "handle_error_response", mock_handle_error_response) - - manager.handle_response(mock_response) - assert manager.http_response_data == mock_response.json() - assert manager.http_response_status_code == mock_response.status_code - mock_handle_error_response.assert_called_once_with(mock_response) - - mock_response.status_code = 200 - manager.handle_response(mock_response) - mock_handle_success_response.assert_called_once_with(mock_response) - - def test_handle_refresh_data(self): - """ - Tests the ResourceManager.refresh_data method. Ensures that Field.set is called once with - the correct data value. - """ - mock_resource = MagicMock() - mock_client = MagicMock() - - fields = [MagicMock() for i in range(5)] - - mock_resource.fields = {f: MagicMock() for f in fields} - mock_data = {f.name: MagicMock() for f in fields} - - manager = ResourceManager(mock_resource, mock_client) - - manager.refresh_data(mock_data) - - for field in mock_resource.fields: - field.set.assert_called_once_with(mock_data[field.name]) - - def test_debug_mode(self, monkeypatch): - """ - Tests that the client object keeps track of responses and errors when debug mode is enabled. - """ - class MockException(Exception): - """ - A mock exception to ensure that the exception raised is the expected one. - """ - pass - - def mock_raise(exception): - raise exception - - mock_get_response = MockGETResponse(200) - - monkeypatch.setattr(requests, "get", lambda url, headers: mock_get_response) - monkeypatch.setattr(requests, "post", lambda url, headers, data: mock_raise(MockException)) - - client = api_client.APIClient(debug=True) - - assert client.DEBUG is True - assert client.errors == [] - assert client.responses == [] - - client.get("") - assert len(client.responses) == 1 - assert client.responses[0] == mock_get_response - - with pytest.raises(MockException): - client.post("", {}) - - assert len(client.errors) == 1 - - -class TestJob: - def test_create_created(self, monkeypatch): - """ - Tests a successful Job creatioin with a mock POST response. Asserts that all fields on - the Job instance have been set correctly and match the mock data. - """ - monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(201)) - job = Job() - job.manager.create(params={}) - - keys_to_check = SAMPLE_JOB_CREATE_RESPONSE.keys() - for key in keys_to_check: - assert getattr(job, key).value == SAMPLE_JOB_CREATE_RESPONSE[key] - - def test_create_bad_request(self, monkeypatch): - """ - Tests that the correct error code is returned when a bad request is sent to the server. - """ - monkeypatch.setattr(requests, "post", lambda url, headers, data: MockPOSTResponse(400)) - job = Job() - - with pytest.raises(Exception): - job.manager.create(params={}) - assert len(job.manager.errors) == 1 - assert job.manager.errors[0]["status_code"] == 400 - assert job.manager.errors[0]["content"] == MockPOSTResponse(400).json() From 67f6321af9674a24d2eab7c3d83bb0ebbdaf8832 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 12:53:31 -0500 Subject: [PATCH 140/335] Tests --- strawberryfields/engine.py | 2 +- tests/frontend/test_engine.py | 151 ++++++++++++++++++++++++++++++++-- 2 files changed, 146 insertions(+), 7 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 785fc8b7b..074cb4e5f 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -774,7 +774,7 @@ def create_job(self, circuit: str) -> Job: "Job creation failed: {}".format(self._format_error_message(response)) ) - def get_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: + def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: """Gets a list of jobs created by the user, optionally filtered by datetime. A maximum of the 100 most recent jobs are returned. diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index f5515c650..a298d2226 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -21,6 +21,7 @@ import strawberryfields as sf from strawberryfields import ops +from strawberryfields.backends.base import BaseBackend from strawberryfields.engine import ( Connection, InvalidJobOperationError, @@ -37,13 +38,151 @@ @pytest.fixture -def prog(): - """A simple program for testing purposes.""" - program = sf.Program(2) - with program.context as q: - # pylint: disable=expression-not-assigned +def eng(backend): + """Engine fixture.""" + return sf.LocalEngine(backend) + + +@pytest.fixture +def prog(backend): + """Program fixture.""" + prog = sf.Program(2) + with prog.context as q: ops.Dgate(0.5) | q[0] - return program + return prog + + +@pytest.fixture +def starship_engine(monkeypatch): + """ + Create a reusable StarshipEngine fixture without a real APIClient. + """ + mock_api_client = MagicMock() + monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) + engine = StarshipEngine("chip0", polling_delay_seconds=0) + return engine + + +class TestEngine: + """Test basic engine functionality""" + + def test_load_backend(self): + """Backend can be correctly loaded via strings""" + eng = sf.LocalEngine("base") + assert isinstance(eng.backend, BaseBackend) + + def test_bad_backend(self): + """Backend must be a string or a BaseBackend instance.""" + with pytest.raises( + TypeError, match="backend must be a string or a BaseBackend instance" + ): + eng = sf.LocalEngine(0) + + +class TestEngineProgramInteraction: + """Test the Engine class and its interaction with Program instances.""" + + def test_history(self, eng, prog): + """Engine history.""" + # no programs have been run + assert not eng.run_progs + eng.run(prog) + # one program has been run + assert len(eng.run_progs) == 1 + assert eng.run_progs[-1] == prog # no compilation required with BaseBackend + + def test_reset(self, eng, prog): + """Running independent programs with an engine reset in between.""" + assert not eng.run_progs + eng.run(prog) + assert len(eng.run_progs) == 1 + + eng.reset() + assert not eng.run_progs + p2 = sf.Program(3) + with p2.context as q: + ops.Rgate(1.0) | q[2] + eng.run(p2) + assert len(eng.run_progs) == 1 + + def test_regref_mismatch(self, eng): + """Running incompatible programs sequentially gives an error.""" + p1 = sf.Program(3) + p2 = sf.Program(p1) + p1.locked = False + with p1.context as q: + ops.Del | q[0] + + with pytest.raises(RuntimeError, match="Register mismatch"): + eng.run([p1, p2]) + + def test_sequential_programs(self, eng): + """Running several program segments sequentially.""" + D = ops.Dgate(0.2) + p1 = sf.Program(3) + with p1.context as q: + D | q[1] + ops.Del | q[0] + assert not eng.run_progs + eng.run(p1) + assert len(eng.run_progs) == 1 + + # p2 succeeds p1 + p2 = sf.Program(p1) + with p2.context as q: + D | q[1] + eng.run(p2) + assert len(eng.run_progs) == 2 + + # p2 does not alter the register so it can be repeated + eng.run([p2] * 3) + assert len(eng.run_progs) == 5 + + eng.reset() + assert not eng.run_progs + + def test_print_applied(self, eng): + """Tests the printing of executed programs.""" + a = 0.23 + r = 0.1 + + def inspect(): + res = [] + print_fn = lambda x: res.append(x.__str__()) + eng.print_applied(print_fn) + return res + + p1 = sf.Program(2) + with p1.context as q: + ops.Dgate(a) | q[1] + ops.Sgate(r) | q[1] + + eng.run(p1) + expected1 = [ + "Run 0:", + "Dgate({}, 0) | (q[1])".format(a), + "Sgate({}, 0) | (q[1])".format(r), + ] + assert inspect() == expected1 + + # run the program again + eng.reset() + eng.run(p1) + assert inspect() == expected1 + + # apply more commands to the same backend + p2 = sf.Program(2) + with p2.context as q: + ops.Rgate(r) | q[1] + + eng.run(p2) + expected2 = expected1 + ["Run 1:", "Rgate({}) | (q[1])".format(r)] + assert inspect() == expected2 + + # reapply history + eng.reset() + eng.run([p1, p2]) + assert inspect() == expected2 @pytest.fixture From 7271f758ddb237b35a90e716ea3fe56491ea06fd Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 14:45:26 -0500 Subject: [PATCH 141/335] Add magic methods, polish docs, cleanup --- doc/code/sf_api_client.rst | 12 ------- doc/index.rst | 1 - strawberryfields/engine.py | 62 ++++++++++++++++++++++++++++------- tests/frontend/test_engine.py | 15 --------- 4 files changed, 51 insertions(+), 39 deletions(-) delete mode 100644 doc/code/sf_api_client.rst diff --git a/doc/code/sf_api_client.rst b/doc/code/sf_api_client.rst deleted file mode 100644 index 3de1ba33a..000000000 --- a/doc/code/sf_api_client.rst +++ /dev/null @@ -1,12 +0,0 @@ -sf.api_client -============= - -.. currentmodule:: strawberryfields.api_client - -.. warning:: - - Unless you are a Strawberry Fields developer, you likely do not need - to access this module directly. - -.. automodapi:: strawberryfields.api_client - :no-heading: diff --git a/doc/index.rst b/doc/index.rst index ffe425455..c2a8b438f 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -174,7 +174,6 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/sf_apps code/sf_ops code/sf_utils - code/sf_api_client code/sf_backends code/sf_circuitspecs code/sf_circuitdrawer diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 074cb4e5f..f8b0667eb 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -569,12 +569,6 @@ class JobStatus(enum.Enum): COMPLETE = "complete" FAILED = "failed" - def __repr__(self) -> str: - return self.value - - def __str__(self) -> str: - return self.value - @property def is_terminal(self) -> bool: """Checks if this status represents a final and immutable state. @@ -667,6 +661,14 @@ def cancel(self): ) self._connection.cancel_job(self.id) + def __repr__(self): + return "<{}: id={}, status={}>".format( + self.__class__.__name__, self.id, self.status.value + ) + + def __str__(self): + return self.__repr__() + class RequestMethod(enum.Enum): """Defines the valid request methods for messages sent to the remote platform.""" @@ -678,11 +680,33 @@ class RequestMethod(enum.Enum): class Connection: """Manages remote connections to the remote job execution platform and exposes - advanced job operations. + various job operations. For basic usage, it is not necessary to manually instantiate this object; the user is encouraged to use the higher-level interface provided by :class:`~StarshipEngine`. + **Example:** + + The following example instantiates a :class:`~Connection` for a given API + authentication token, tests the connection, and makes requests for a single or + multiple jobs. + + .. code-block:: python + + connection = Connection(token="abc") + + # Ping the remote server + success = connection.ping() + # True if successful, or False if cannot connect or not authenticated + + # Get all jobs submitted for this token + jobs = connection.get_all_jobs() + jobs # [, ...] + + # Get a specific job by ID + job = connection.get_job("59a1c0b1-c6a7-4f9b-ae37-0ac5eec9c413") + job # + Args: token (str): the API authentication token host (str): the hostname of the remote platform @@ -904,6 +928,14 @@ def _format_error_message(response: requests.Response) -> str: body.get("status_code", ""), body.get("code", ""), body.get("detail", "") ) + def __repr__(self): + return "<{}: token={}, host={}>".format( + self.__class__.__name__, self.token, self.host + ) + + def __str__(self): + return self.__repr__() + class StarshipEngine: """A quantum program executor engine that that provides a simple interface for @@ -921,7 +953,7 @@ class StarshipEngine: # Run a job synchronously result = engine.run(program, shots=1) # (Engine blocks until job is complete) - result # [[0, 1, 0, 2, 1, 0, 0, 0]] + result # [[0 1 0 2 1 0 0 0]] # Run a job synchronously, but cancel it before it is completed result = engine.run(program, shots=1) @@ -929,12 +961,12 @@ class StarshipEngine: # Run a job asynchronously job = engine.run_async(program, shots=1) - job.status # "queued" + job.status # job.result # InvalidJobOperationError # (After some time...) job.refresh() - job.status # "complete" - job.result # [[0, 1, 0, 2, 1, 0, 0, 0]] + job.status # + job.result # [[0 1 0 2 1 0 0 0]] Args: target (str): the target backend @@ -1030,6 +1062,14 @@ def run_async(self, program: Program, shots: int = 1) -> Job: bb._target["options"] = {"shots": shots} return self._connection.create_job(bb.serialize()) + def __repr__(self): + return "<{}: target={}, connection={}>".format( + self.__class__.__name__, self.target, self.connection + ) + + def __str__(self): + return self.__repr__() + class Engine(LocalEngine): """dummy""" diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index a298d2226..e05f53d08 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -52,17 +52,6 @@ def prog(backend): return prog -@pytest.fixture -def starship_engine(monkeypatch): - """ - Create a reusable StarshipEngine fixture without a real APIClient. - """ - mock_api_client = MagicMock() - monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine("chip0", polling_delay_seconds=0) - return engine - - class TestEngine: """Test basic engine functionality""" @@ -450,10 +439,6 @@ def test_run_complete(self, connection, prog, monkeypatch): with pytest.raises(AttributeError): _ = result.state - def test_run_cancelled(self): - """Tests a manual cancellation of synchronous job execution.""" - # TODO - def test_run_async(self, connection, prog, monkeypatch): """Tests a successful asynchronous job execution.""" id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) From d203d89369dd526f9704100ae3eb0c21ac37a279 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 15:02:28 -0500 Subject: [PATCH 142/335] Connection.create_job() accepts Program directly instead of serialized blackbird --- strawberryfields/engine.py | 35 ++++++++++++++++++++++------------- tests/frontend/test_engine.py | 8 ++++---- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index f8b0667eb..97cab3534 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -688,8 +688,8 @@ class Connection: **Example:** The following example instantiates a :class:`~Connection` for a given API - authentication token, tests the connection, and makes requests for a single or - multiple jobs. + authentication token, tests the connection, submits a new job, and makes requests + for a single or multiple existing jobs. .. code-block:: python @@ -699,6 +699,10 @@ class Connection: success = connection.ping() # True if successful, or False if cannot connect or not authenticated + # Submit a new job + job = connection.create_job("chip2", program, shots=123) + job # + # Get all jobs submitted for this token jobs = connection.get_all_jobs() jobs # [, ...] @@ -778,15 +782,25 @@ def base_url(self) -> str: "s" if self.use_ssl else "", self.host, self.port ) - def create_job(self, circuit: str) -> Job: + def create_job(self, target: str, program: Program, shots: int) -> Job: """Creates a job with the given circuit. Args: - circuit (str): the serialized Blackbird program + target (str): the target device + program (strawberryfields.Program): the quantum circuit + shots (int): the number of shots Returns: strawberryfields.engine.Job: the created job """ + # Serialize a blackbird circuit for network transmission + bb = to_blackbird(program) + # pylint: disable=protected-access + bb._target["name"] = target + # pylint: disable=protected-access + bb._target["options"] = {"shots": shots} + circuit = bb.serialize() + response = self._post("/jobs", data=json.dumps({"circuit": circuit})) if response.status_code == 201: return Job( @@ -969,7 +983,7 @@ class StarshipEngine: job.result # [[0 1 0 2 1 0 0 0]] Args: - target (str): the target backend + target (str): the target device connection (strawberryfields.engine.Connection): a connection to the remote job execution platform """ @@ -999,10 +1013,10 @@ def __init__(self, target: str, connection: Connection = None): @property def target(self) -> str: - """The target backend used by the engine. + """The target device used by the engine. Returns: - str: the target backend used by the engine + str: the target device used by the engine """ return self._target @@ -1055,12 +1069,7 @@ def run_async(self, program: Program, shots: int = 1) -> Job: Returns: strawberryfields.engine.Job: the created remote job """ - bb = to_blackbird(program) - # pylint: disable=protected-access - bb._target["name"] = self.target - # pylint: disable=protected-access - bb._target["options"] = {"shots": shots} - return self._connection.create_job(bb.serialize()) + return self._connection.create_job(self.target, program, shots) def __repr__(self): return "<{}: target={}, connection={}>".format( diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index e05f53d08..e78d99587 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -272,7 +272,7 @@ def test_init(self): assert connection.base_url == "https://host:123" - def test_create_job(self, connection, monkeypatch): + def test_create_job(self, prog, connection, monkeypatch): """Tests a successful job creation flow.""" id_, status = "123", JobStatus.QUEUED @@ -282,17 +282,17 @@ def test_create_job(self, connection, monkeypatch): mock_return(mock_response(201, {"id": id_, "status": status})), ) - job = connection.create_job("circuit") + job = connection.create_job("chip2", prog, 1) assert job.id == id_ assert job.status == status - def test_create_job_error(self, connection, monkeypatch): + def test_create_job_error(self, prog, connection, monkeypatch): """Tests a failed job creation flow.""" monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) with pytest.raises(RequestFailedError): - connection.create_job("circuit") + connection.create_job("chip2", prog, 1) def test_get_all_jobs(self, connection, monkeypatch): """Tests a successful job list request.""" From e44a1b7d5d56fd0d4cb203cb67e246cddd3686a0 Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 21 Feb 2020 15:39:09 -0500 Subject: [PATCH 143/335] Unit tests for the parts making up load_config --- strawberryfields/configuration.py | 52 +++---- tests/frontend/test_configuration.py | 220 +++++++++++++++------------ 2 files changed, 149 insertions(+), 123 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 910b2df58..6b43011fe 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -60,20 +60,20 @@ def load_config(filename="config.toml", **kwargs): config = create_config_object(**kwargs) - config_file = look_for_config_file(filename=filename) + parsed_config = look_for_config_in_file(filename=filename) - if config_file is not None: - config = update_config_from_config_file(config, config_file) + if parsed_config is not None: + update_with_other_config(config, other_config=parsed_config) else: log.info("No Strawberry Fields configuration file found.") - config = update_config_from_environmental_variables(config) + update_from_environmental_variables(config) return config -def create_config_object(**kwargs): - print('kwargs in create_config_object', kwargs) - authentication_token = kwargs.get("authentication_token", "") +def create_config_object(authentication_token="", **kwargs): + """ + contains the recognized options for configuration.""" hostname = kwargs.get("hostname", "localhost") use_ssl = kwargs.get("use_ssl", True) port = kwargs.get("port", 443) @@ -90,18 +90,7 @@ def create_config_object(**kwargs): } return config -def update_config_from_config_file(config, config_file): - - # Here an example for sectionconfig is api - for section, sectionconfig in config.items(): - for key in sectionconfig: - if key in config_file[section]: - # Update from configuration file - config[section][key] = config_file[section][key] - - return config - -def look_for_config_file(filename="config.toml"): +def look_for_config_in_file(filename="config.toml"): # Search the current directory, the directory under environment # variable SF_CONF, and default user config directory, in that order. current_dir = os.getcwd() @@ -112,15 +101,24 @@ def look_for_config_file(filename="config.toml"): for directory in directories: filepath = os.path.join(directory, filename) try: - config_file = load_config_file(filepath) + parsed_config = parse_config_file(filepath) break except FileNotFoundError: - config_file = None + parsed_config = None # TODO: maybe we need a merge here? - return config_file + return parsed_config + +def update_with_other_config(config, other_config): + + # Here an example for sectionconfig is api + for section, sectionconfig in config.items(): + for key in sectionconfig: + if key in other_config[section]: + # Update from configuration file + config[section][key] = other_config[section][key] -def update_config_from_environmental_variables(config): +def update_from_environmental_variables(config): for section, sectionconfig in config.items(): env_prefix = "SF_{}_".format(section.upper()) for key in sectionconfig: @@ -128,14 +126,14 @@ def update_config_from_environmental_variables(config): if env in os.environ: config[section][key] = parse_environment_variable(env, os.environ[env]) -def load_config_file(filepath): +def parse_config_file(filepath): """Load a configuration file. Args: filepath (str): path to the configuration file """ with open(filepath, "r") as f: - config_file = toml.load(f) - return config_file + config_from_file = toml.load(f) + return config_from_file -#configuration = load_config() +configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 8816e7dae..2cbe1e937 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -25,6 +25,7 @@ pytestmark = pytest.mark.frontend logging.getLogger().setLevel(1) +authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" TEST_FILE = """\ [api] @@ -53,22 +54,33 @@ } -class TestConfiguration: - """Tests for the configuration class""" +class TestCreteConfigObject: + def test_empty_config_object(self): + config = conf.create_config_object(authentication_token="", + hostname="", + use_ssl="", + debug="", + port="") - def test_create_config_object(self): + assert all(value=="" for value in config["api"].values()) + def test_config_object_with_authentication_token(self): assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG - def test_load_config_file(self, tmpdir, monkeypatch): +class TestConfiguration: + """Tests for the configuration class""" + + def test_parse_config_file(self, tmpdir, monkeypatch): filename = tmpdir.join("config.toml") with open(filename, "w") as f: f.write(TEST_FILE) - config_file = conf.load_config_file(filepath=filename) + config_file = conf.parse_config_file(filepath=filename) assert config_file == EXPECTED_CONFIG +class TestLookForConfigInFile: + def test_loading_current_directory(self, tmpdir, monkeypatch): """Test that the default configuration file is loaded from the current directory, if found.""" @@ -76,8 +88,8 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) - m.setattr(conf, "load_config_file", lambda filepath: filepath) - config_file = conf.look_for_config_file(filename=filename) + m.setattr(conf, "parse_config_file", lambda filepath: filepath) + config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join(filename) @@ -96,12 +108,12 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) # Need to mock the module specific function # m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") # os.environ["SF_CONF"] = lambda: FileNotFoundError - config_file = conf.look_for_config_file(filename=filename) + config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") def test_loading_user_config_dir(self, tmpdir, monkeypatch): @@ -121,13 +133,13 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file = conf.look_for_config_file(filename=filename) + config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): - """Test that the the look_for_config_file returns None if the + """Test that the the look_for_config_in_file returns None if the configuration file is nowhere to be found. This is a test case for when there is no configuration file: @@ -144,80 +156,110 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file = conf.look_for_config_file(filename=filename) + config_file = conf.look_for_config_in_file(filename=filename) assert config_file is None - def test_loading_absolute_path(self, tmpdir, monkeypatch): - """Test that the default configuration file can be loaded - via an absolute path.""" - # TODO: Some state seems to be left hereThis test does not work if - # there is already a configuration file in place - # {'api': {'authentication_token': '071cdcce-9241-4965-93af-4a4dbc739135', - # 'hostname': 'localhost', 'use_ssl': True, 'port': '443', 'debug': False}} - # {'api': {'authentication_token': '071cdcce-9241-4965-93af-4a4dbc739135', - # 'hostname': 'localhost', 'use_ssl': True, 'debug': False, 'port': 443}} - - # config._config seems to output a string at times - filename = os.path.abspath(tmpdir.join("config.toml")) - - with open(filename, "w") as f: - f.write(TEST_FILE) - - os.environ["SF_CONF"] = "" - config = conf.Configuration(name=str(filename)) - - assert config._config == EXPECTED_CONFIG - assert config.path == filename - - def test_not_found_warning(self, caplog): - """Test that a warning is raised if no configuration file found.""" - - conf.Configuration(name="noconfig") - assert "No Strawberry Fields configuration file found." in caplog.text - - def test_save(self, tmpdir): - """Test saving a configuration file.""" - filename = str(tmpdir.join("test_config.toml")) - config = conf.Configuration() - - # make a change - config._config["api"]["hostname"] = "https://6.4.2.4" - config.save(filename) - - result = toml.load(filename) - assert config._config == result - - def test_attribute_loading(self): - """Test attributes automatically get the correct section key""" - config = conf.Configuration() - assert config.api == config._config["api"] - - def test_failed_attribute_loading(self): - """Test an exception is raised if key does not exist""" - config = conf.Configuration() - with pytest.raises( - conf.ConfigurationError, match="Unknown Strawberry Fields configuration section" - ): - config.test - - def test_env_vars_take_precedence(self, tmpdir): - """Test that if a configuration file and an environment - variable is set, that the environment variable takes - precedence.""" - filename = tmpdir.join("config.toml") - - with open(filename, "w") as f: - f.write(TEST_FILE) - - host = "https://6.4.2.4" - - os.environ["SF_API_HOSTNAME"] = host - config = conf.Configuration(str(filename)) - - assert config.api["hostname"] == host + class TestUpdateWithOtherConfig: + + def test_update_entire_config(self): + config = conf.create_config_object() + assert config["api"]["authentication_token"] == "" + + conf.update_with_other_config(config, EXPECTED_CONFIG) + assert config == EXPECTED_CONFIG + + ONLY_AUTH_CONFIG = { + "api": { + "authentication_token": "PlaceHolder", + } + } + + ONLY_HOST_CONFIG = { + "api": { + "hostname": "PlaceHolder", + } + } + + ONLY_SSL_CONFIG = { + "api": { + "use_ssl": "PlaceHolder", + } + } + + ONLY_DEBUG_CONFIG = { + "api": { + "debug": "PlaceHolder", + } + } + + ONLY_PORT_CONFIG = { + "api": {"port": "PlaceHolder"} + } + + @pytest.mark.parametrize("specific_key, config_to_update_with", [("authentication_token",ONLY_AUTH_CONFIG), + ("hostname",ONLY_HOST_CONFIG), + ("use_ssl",ONLY_SSL_CONFIG), + ("debug",ONLY_DEBUG_CONFIG), + ("port",ONLY_PORT_CONFIG)]) + def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): + config = conf.create_config_object() + assert config["api"][specific_key] != "PlaceHolder" + + conf.update_with_other_config(config, config_to_update_with) + assert config["api"][specific_key] == "PlaceHolder" + assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) + +environmental_variables = [ + "SF_API_AUTHENTICATION_TOKEN", + "SF_API_HOSTNAME", + "SF_API_USE_SSL", + "SF_API_DEBUG", + "SF_API_PORT" + ] + +class TestUpdateFromEnvironmentalVariables: + + def test_all_environmental_variables_defined(self): + + for key in environmental_variables: + os.environ[key] = "PlaceHolder" + + config = conf.create_config_object() + assert not any(v == "PlaceHolder" for k, v in config["api"].items()) + + conf.update_from_environmental_variables(config) + assert all(v == "PlaceHolder" for k, v in config["api"].items()) + + # Tear-down + for key in environmental_variables: + del os.environ[key] + assert key not in os.environ + + environmental_variables_with_keys = [ + ("SF_API_AUTHENTICATION_TOKEN","authentication_token"), + ("SF_API_HOSTNAME","hostname"), + ("SF_API_USE_SSL","use_ssl"), + ("SF_API_DEBUG","debug"), + ("SF_API_PORT","port") + ] + + @pytest.mark.parametrize("specific_env_var, specific_key", environmental_variables_with_keys) + def test_one_environmental_variables_defined(self, specific_env_var, specific_key): + os.environ[specific_env_var] = "PlaceHolder" + + config = conf.create_config_object() + assert not any(v == "PlaceHolder" for k, v in config["api"].items()) + + conf.update_from_environmental_variables(config) + assert config["api"][specific_key] == "PlaceHolder" + assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) + + # Tear-down + del os.environ[specific_env_var] + assert specific_env_var not in os.environ def test_parse_environment_variable(self, monkeypatch): monkeypatch.setattr(conf, "BOOLEAN_KEYS", ("some_boolean",)) @@ -236,17 +278,3 @@ def test_parse_environment_variable(self, monkeypatch): something_else = MagicMock() assert conf.parse_environment_variable("not_a_boolean", something_else) == something_else - def test_update_config_with_limited_config_file(self, tmpdir, monkeypatch): - """ - This test asserts that the given a config file that only provides a single - value, the rest of the configuration values are filled in using defaults. - """ - filename = tmpdir.join("config.toml") - - with open(filename, "w") as f: - f.write(TEST_FILE_ONE_VALUE) - - config = conf.Configuration(str(filename)) - assert config.api["hostname"] == conf.DEFAULT_CONFIG["api"]["hostname"] - assert config.api["use_ssl"] == conf.DEFAULT_CONFIG["api"]["use_ssl"] - assert config.api["authentication_token"] == "071cdcce-9241-4965-93af-4a4dbc739135" From a04131eecf1294d2c1b74cca623d8327e56f7091 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 16:39:48 -0500 Subject: [PATCH 144/335] Fix Result.samples to have correct dims of (shots, modes) --- strawberryfields/engine.py | 12 +++--------- tests/frontend/test_engine.py | 4 ++-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 97cab3534..accc92f5e 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -89,9 +89,6 @@ def __init__(self, samples, is_stateful=True): self._state = None self._is_stateful = is_stateful - # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - if len(np.shape(samples)) > 1: - samples = np.stack(samples, 1) self._samples = samples @property @@ -159,7 +156,7 @@ def __init__(self, backend, backend_options=None): self.backend_options = backend_options.copy() # dict is mutable #: List[Program]: list of Programs that have been run self.run_progs = [] - #: List[List[Number]]: latest measurement results, shape == (modes, shots) + #: List[List[Number]]: latest measurement results, shape == (shots, modes) self.samples = None if isinstance(backend, str): @@ -400,7 +397,7 @@ def _broadcast_nones(val, dim): prev = p if self.samples is not None: - return Result(self.samples.copy()) + return Result(np.array(self.samples).T) class LocalEngine(BaseEngine): @@ -882,10 +879,7 @@ def get_job_result(self, job_id: str) -> Result: buf.seek(0) samples = np.load(buf) - # NOTE To maintain consistency with other components for now, transpose - # the received result array from (shots, modes) to (modes, shots), - # which allows us to keep the logic in `Result.samples` unchanged - return Result(samples.T, is_stateful=False) + return Result(samples, is_stateful=False) raise RequestFailedError(self._format_error_message(response)) def cancel_job(self, job_id: str): diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index e78d99587..4e0a5d057 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -434,7 +434,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples.T, result_expected) + assert np.array_equal(result.samples, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -464,7 +464,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert np.array_equal(job.result.samples.T, result_expected) + assert np.array_equal(job.result.samples, result_expected) with pytest.raises(AttributeError): _ = job.result.state From e0afae135364f2bcfd291bc561fe84271aa1ae0e Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 21 Feb 2020 16:57:05 -0500 Subject: [PATCH 145/335] Unit test for load_config; removing MagicMock from parse_environment_variable test; adding teardown logic for env vars; correcting typo --- strawberryfields/configuration.py | 6 +-- tests/frontend/test_configuration.py | 63 ++++++++++++++++++++-------- 2 files changed, 48 insertions(+), 21 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 6b43011fe..43ae1be36 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,7 +67,7 @@ def load_config(filename="config.toml", **kwargs): else: log.info("No Strawberry Fields configuration file found.") - update_from_environmental_variables(config) + update_from_environment_variables(config) return config @@ -118,7 +118,7 @@ def update_with_other_config(config, other_config): # Update from configuration file config[section][key] = other_config[section][key] -def update_from_environmental_variables(config): +def update_from_environment_variables(config): for section, sectionconfig in config.items(): env_prefix = "SF_{}_".format(section.upper()) for key in sectionconfig: diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 2cbe1e937..036a85edf 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ import toml -from unittest.mock import MagicMock - from strawberryfields import configuration as conf pytestmark = pytest.mark.frontend @@ -79,6 +77,29 @@ def test_parse_config_file(self, tmpdir, monkeypatch): assert config_file == EXPECTED_CONFIG +class TestLoadConfig: + + def test_not_found_warning(self, caplog): + """Test that a warning is raised if no configuration file found.""" + + conf.load_config(filename='NotAFileName') + assert "No Strawberry Fields configuration file found." in caplog.text + + def test_check_call_order(self, monkeypatch): + + def mock_look_for_config_in_file(*args, **kwargs): + call_history.append(2) + return "NotNone" + + call_history = [] + with monkeypatch.context() as m: + m.setattr(conf, "create_config_object", lambda *args: call_history.append(1)) + m.setattr(conf, "look_for_config_in_file", mock_look_for_config_in_file) + m.setattr(conf, "update_with_other_config", lambda *args, **kwargs: call_history.append(3)) + m.setattr(conf, "update_from_environment_variables", lambda *args: call_history.append(4)) + conf.load_config() + assert call_history == [1,2,3,4] + class TestLookForConfigInFile: def test_loading_current_directory(self, tmpdir, monkeypatch): @@ -95,7 +116,7 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): def test_loading_env_variable(self, tmpdir, monkeypatch): """Test that the correct configuration file is found using the correct - environmental variable. + environment variable. This is a test case for when there is no configuration file in the current directory.""" @@ -122,7 +143,7 @@ def test_loading_user_config_dir(self, tmpdir, monkeypatch): This is a test case for when there is no configuration file: -in the current directory or - -in the directory contained in the corresponding environmental + -in the directory contained in the corresponding environment variable.""" filename = "config.toml" @@ -144,7 +165,7 @@ def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): This is a test case for when there is no configuration file: -in the current directory or - -in the directory contained in the corresponding environmental + -in the directory contained in the corresponding environment variable -in the user_config_dir directory of Strawberry Fields.""" filename = "config.toml" @@ -212,7 +233,7 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi assert config["api"][specific_key] == "PlaceHolder" assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) -environmental_variables = [ +environment_variables = [ "SF_API_AUTHENTICATION_TOKEN", "SF_API_HOSTNAME", "SF_API_USE_SSL", @@ -222,23 +243,23 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi class TestUpdateFromEnvironmentalVariables: - def test_all_environmental_variables_defined(self): + def test_all_environment_variables_defined(self): - for key in environmental_variables: + for key in environment_variables: os.environ[key] = "PlaceHolder" config = conf.create_config_object() assert not any(v == "PlaceHolder" for k, v in config["api"].items()) - conf.update_from_environmental_variables(config) + conf.update_from_environment_variables(config) assert all(v == "PlaceHolder" for k, v in config["api"].items()) - + def test_one_environment_variable_defined(self, specific_env_var, specific_key): # Tear-down - for key in environmental_variables: + for key in environment_variables: del os.environ[key] assert key not in os.environ - environmental_variables_with_keys = [ + environment_variables_with_keys = [ ("SF_API_AUTHENTICATION_TOKEN","authentication_token"), ("SF_API_HOSTNAME","hostname"), ("SF_API_USE_SSL","use_ssl"), @@ -246,15 +267,22 @@ def test_all_environmental_variables_defined(self): ("SF_API_PORT","port") ] - @pytest.mark.parametrize("specific_env_var, specific_key", environmental_variables_with_keys) - def test_one_environmental_variables_defined(self, specific_env_var, specific_key): + @pytest.mark.parametrize("specific_env_var, specific_key", environment_variables_with_keys) + def test_one_environment_variable_defined(self, specific_env_var, specific_key): + # Making sure that no environment variable was defined previously + for key in environment_variables: + if key in os.environ: + del os.environ[key] + assert key not in os.environ + os.environ[specific_env_var] = "PlaceHolder" config = conf.create_config_object() assert not any(v == "PlaceHolder" for k, v in config["api"].items()) - conf.update_from_environmental_variables(config) + conf.update_from_environment_variables(config) assert config["api"][specific_key] == "PlaceHolder" + assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) # Tear-down @@ -275,6 +303,5 @@ def test_parse_environment_variable(self, monkeypatch): assert conf.parse_environment_variable("some_boolean", "0") is False assert conf.parse_environment_variable("some_boolean", 0) is False - something_else = MagicMock() - assert conf.parse_environment_variable("not_a_boolean", something_else) == something_else + assert conf.parse_environment_variable("not_a_boolean","something_else") == "something_else" From 3ca6a5bf3acd05172b728ab343e18064ea6918cd Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 21 Feb 2020 17:07:12 -0500 Subject: [PATCH 146/335] Marking xfail API Client tests (being refactored in another PR) --- tests/frontend/test_api_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index edd1e9602..61e262de4 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -132,7 +132,7 @@ def json(self): def raise_for_status(self): raise requests.exceptions.HTTPError() - +@pytest.mark.xfail class TestAPIClient: def test_init_default_client(self): """ @@ -206,7 +206,7 @@ def test_join_path(self, client): """ assert client.join_path("jobs") == "{client.BASE_URL}/jobs".format(client=client) - +@pytest.mark.xfail class TestResourceManager: def test_init(self): """ From abdc0afe85319a430e87a176c467a749bb5c2768 Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 21 Feb 2020 17:07:44 -0500 Subject: [PATCH 147/335] Marking xfail API Client tests (being refactored in another PR) --- tests/frontend/test_api_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/frontend/test_api_client.py b/tests/frontend/test_api_client.py index 61e262de4..7228f6b44 100644 --- a/tests/frontend/test_api_client.py +++ b/tests/frontend/test_api_client.py @@ -386,7 +386,7 @@ def mock_raise(exception): assert len(client.errors) == 1 - +@pytest.mark.xfail class TestJob: def test_create_created(self, monkeypatch): """ From 2995cd986c535e71e067dc02434ee7fdb67d7a2d Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 21 Feb 2020 17:16:20 -0500 Subject: [PATCH 148/335] Adding xfail mark on another API Client test --- tests/frontend/test_engine.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 4c1f14b81..613c61734 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -317,6 +317,7 @@ def test_run(self, starship_engine, monkeypatch): compile_options={} ) + @pytest.mark.xfail def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): """ This is an integration test that tests and actual program being submitted to a mock API, and From 07701729f95a509b8c2e2916fc4ae0de9fde01bf Mon Sep 17 00:00:00 2001 From: antalszava Date: Sun, 23 Feb 2020 23:22:02 -0500 Subject: [PATCH 149/335] Docstrings, reorganizing parse_environment_variables function, adding DEFAULT_CONFIG as module attribute --- strawberryfields/configuration.py | 149 +++++++++++++++++++++------ tests/frontend/test_configuration.py | 12 +-- 2 files changed, 124 insertions(+), 37 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 43ae1be36..4a8d409a3 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -33,31 +33,22 @@ "debug": False} } -BOOLEAN_KEYS = ("debug", "use_ssl") - - -def parse_environment_variable(key, value): - trues = (True, "true", "True", "TRUE", "1", 1) - falses = (False, "false", "False", "FALSE", "0", 0) - - if key in BOOLEAN_KEYS: - if value in trues: - return True - elif value in falses: - return False - else: - raise ValueError("Boolean could not be parsed") - else: - return value - class ConfigurationError(Exception): """Exception used for configuration errors""" -# This function will be used by the Connection object +# TODO: somehow store the latest configuration path that was used def load_config(filename="config.toml", **kwargs): + """Load configuration from keyword arguments, configuration file or + environment variables. + + Args: + filename (str): the name of the configuration file to look for + + Keyword arguments: + """ config = create_config_object(**kwargs) parsed_config = look_for_config_in_file(filename=filename) @@ -72,8 +63,27 @@ def load_config(filename="config.toml", **kwargs): return config def create_config_object(authentication_token="", **kwargs): + """Create a configuration object that stores configuration related data + organized into sections. + + Currently API related configuration options are defined. This function + takes into consideration only pre-defined options. + + If called without passing any keyword arguments, then a default + configuration object is created. + + Keyword arguments: + authentication_token (str): the token to be used for user + authentication + hostname (str): the name of the host to connect to + use_ssl (bool): specifies if requests should be sent using SSL + port (int): the port to be used when connecting to the remote service + debug (bool): determines if the debugging mode is requested + + Returns: + dict of str: (dict of str: Union[str, bool, int]): the configuration + object """ - contains the recognized options for configuration.""" hostname = kwargs.get("hostname", "localhost") use_ssl = kwargs.get("use_ssl", True) port = kwargs.get("port", 443) @@ -84,13 +94,30 @@ def create_config_object(authentication_token="", **kwargs): "authentication_token": authentication_token, "hostname": hostname, "use_ssl": use_ssl, - "debug": debug, - "port": port + "port": port, + "debug": debug } } return config def look_for_config_in_file(filename="config.toml"): + """Looks for the first configuration file to be found at certain paths. + + .. note:: + + The following directories are checked (in the following order): + -The current working directory + -The directory specified by the environment variable SF_CONF (if specified) + -The user configuration directory (if specified) + + Keyword arguments: + filename (str): the configuration file to look for + + Returns: + dict of str: (dict of str: Union[str, bool, int]) or None: the + configuration object that was loaded + """ + # Search the current directory, the directory under environment # variable SF_CONF, and default user config directory, in that order. current_dir = os.getcwd() @@ -101,24 +128,66 @@ def look_for_config_in_file(filename="config.toml"): for directory in directories: filepath = os.path.join(directory, filename) try: - parsed_config = parse_config_file(filepath) + parsed_config = load_config_file(filepath) break except FileNotFoundError: parsed_config = None - # TODO: maybe we need a merge here? return parsed_config +def load_config_file(filepath): + """Load a configuration from a TOML formatted file. + + Args: + filepath (str): path to the configuration file + + Returns: + dict of str: (dict of str: Union[str, bool, int]): the configuration + object that was loaded + """ + with open(filepath, "r") as f: + config_from_file = toml.load(f) + return config_from_file + def update_with_other_config(config, other_config): + """Updates the current configuration object with another one. - # Here an example for sectionconfig is api + Args: + config (dict of str: (dict of str: Union[str, bool, int])): the + configuration to be updated + other_config (dict of str: (dict of str: Union[str, bool, int])): the + configuration used for updating + + Returns: + dict of str: (dict of str: Union[str, bool, int])): the updated + configuration + """ + # Here an example for sectionconfig is API for section, sectionconfig in config.items(): for key in sectionconfig: if key in other_config[section]: - # Update from configuration file config[section][key] = other_config[section][key] def update_from_environment_variables(config): + """Updates the current configuration object from data stored in environment + variables. + + .. note:: + + Currently the following environment variables are checked: + -SF_API_AUTHENTICATION_TOKEN + -SF_API_HOSTNAME + -SF_API_USE_SSL + -SF_API_DEBUG + -SF_API_PORT + + Args: + config (dict of str: (dict of str: Union[str, bool, int])): the + configuration to be updated + Returns: + dict of str: (dict of str: Union[str, bool, int])): the updated + configuration + """ for section, sectionconfig in config.items(): env_prefix = "SF_{}_".format(section.upper()) for key in sectionconfig: @@ -126,14 +195,32 @@ def update_from_environment_variables(config): if env in os.environ: config[section][key] = parse_environment_variable(env, os.environ[env]) -def parse_config_file(filepath): - """Load a configuration file. + +BOOLEAN_KEYS = ("debug", "use_ssl") + +def parse_environment_variable(key, value): + """Parse a value stored in an environment variable. Args: - filepath (str): path to the configuration file + key (str): the name of the environment variable + value (Union[str, bool, int]): the value obtained from the environment + variable + + Returns: + [str, bool, int]: the parsed value """ - with open(filepath, "r") as f: - config_from_file = toml.load(f) - return config_from_file + trues = (True, "true", "True", "TRUE", "1", 1) + falses = (False, "false", "False", "FALSE", "0", 0) + + if key in BOOLEAN_KEYS: + if value in trues: + return True + elif value in falses: + return False + else: + raise ValueError("Boolean could not be parsed") + else: + return value +DEFAULT_CONFIG = create_config_object() configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 036a85edf..148914ff3 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -67,13 +67,13 @@ def test_config_object_with_authentication_token(self): class TestConfiguration: """Tests for the configuration class""" - def test_parse_config_file(self, tmpdir, monkeypatch): + def test_load_config_file(self, tmpdir, monkeypatch): filename = tmpdir.join("config.toml") with open(filename, "w") as f: f.write(TEST_FILE) - config_file = conf.parse_config_file(filepath=filename) + config_file = conf.load_config_file(filepath=filename) assert config_file == EXPECTED_CONFIG @@ -109,7 +109,7 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) - m.setattr(conf, "parse_config_file", lambda filepath: filepath) + m.setattr(conf, "load_config_file", lambda filepath: filepath) config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join(filename) @@ -129,7 +129,7 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") - m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) # Need to mock the module specific function # m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") @@ -154,7 +154,7 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") - m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") @@ -177,7 +177,7 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") - m.setattr(conf, "parse_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) + m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) config_file = conf.look_for_config_in_file(filename=filename) From e7eb96a6c5a06b72bf6806c5837b3be020cb80d1 Mon Sep 17 00:00:00 2001 From: antalszava Date: Sun, 23 Feb 2020 23:30:15 -0500 Subject: [PATCH 150/335] Add create config object test with every keyword argument --- tests/frontend/test_configuration.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 148914ff3..ede5e2499 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -51,9 +51,21 @@ } } +OTHER_EXPECTED_CONFIG = { + "api": { + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "hostname": "SomeHost", + "use_ssl": False, + "debug": True, + "port": 56, + } +} class TestCreteConfigObject: + """Test the creation of a configuration object""" + def test_empty_config_object(self): + """Test that an empty configuration object can be created.""" config = conf.create_config_object(authentication_token="", hostname="", use_ssl="", @@ -61,9 +73,21 @@ def test_empty_config_object(self): port="") assert all(value=="" for value in config["api"].values()) + def test_config_object_with_authentication_token(self): + """Test that passing only the authentication token creates the expected + configuration object.""" assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG + def test_config_object_every_keyword_argument(self): + """Test that passing only the authentication token creates the expected + configuration object.""" + assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135", + hostname="SomeHost", + use_ssl=False, + debug=True, + port=56) == OTHER_EXPECTED_CONFIG + class TestConfiguration: """Tests for the configuration class""" From e074ca50659a160fdeefd61855ac94ca129242cf Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 21 Feb 2020 16:39:48 -0500 Subject: [PATCH 151/335] Fix Result.samples to have correct dims of (shots, modes) --- strawberryfields/engine.py | 12 +++--------- tests/frontend/test_engine.py | 4 ++-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 97cab3534..accc92f5e 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -89,9 +89,6 @@ def __init__(self, samples, is_stateful=True): self._state = None self._is_stateful = is_stateful - # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - if len(np.shape(samples)) > 1: - samples = np.stack(samples, 1) self._samples = samples @property @@ -159,7 +156,7 @@ def __init__(self, backend, backend_options=None): self.backend_options = backend_options.copy() # dict is mutable #: List[Program]: list of Programs that have been run self.run_progs = [] - #: List[List[Number]]: latest measurement results, shape == (modes, shots) + #: List[List[Number]]: latest measurement results, shape == (shots, modes) self.samples = None if isinstance(backend, str): @@ -400,7 +397,7 @@ def _broadcast_nones(val, dim): prev = p if self.samples is not None: - return Result(self.samples.copy()) + return Result(np.array(self.samples).T) class LocalEngine(BaseEngine): @@ -882,10 +879,7 @@ def get_job_result(self, job_id: str) -> Result: buf.seek(0) samples = np.load(buf) - # NOTE To maintain consistency with other components for now, transpose - # the received result array from (shots, modes) to (modes, shots), - # which allows us to keep the logic in `Result.samples` unchanged - return Result(samples.T, is_stateful=False) + return Result(samples, is_stateful=False) raise RequestFailedError(self._format_error_message(response)) def cancel_job(self, job_id: str): diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index e78d99587..4e0a5d057 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -434,7 +434,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples.T, result_expected) + assert np.array_equal(result.samples, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -464,7 +464,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert np.array_equal(job.result.samples.T, result_expected) + assert np.array_equal(job.result.samples, result_expected) with pytest.raises(AttributeError): _ = job.result.state From 7bc48ca6d9203886852172cd1f9f08424890989d Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 15:03:38 -0500 Subject: [PATCH 152/335] Adding load_config tests using each units separately; change logic for kwargs update; add logic for integer parsing from env vars; add tests --- strawberryfields/configuration.py | 35 +++- tests/frontend/test_configuration.py | 240 ++++++++++++++++++--------- 2 files changed, 188 insertions(+), 87 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 4a8d409a3..6c7b66527 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -43,13 +43,30 @@ def load_config(filename="config.toml", **kwargs): """Load configuration from keyword arguments, configuration file or environment variables. - Args: - filename (str): the name of the configuration file to look for + .. note:: + + The configuration object (that is a nested dictionary) would be created based + on the following (order defines the importance, going from most + important to least important): + + 1. keyword arguments passed to ``load_config`` + 2. data contained in environmental variables (if any) + 3. data contained in a configuration file (if exists) Keyword arguments: + filename (str): the name of the configuration file to look for + authentication_token (str): the token to be used for user + authentication + hostname (str): the name of the host to connect to + use_ssl (bool): specifies if requests should be sent using SSL + port (int): the port to be used when connecting to the remote service + debug (bool): determines if the debugging mode is requested + Returns: + dict of str: (dict of str: Union[str, bool, int]): the configuration + object """ - config = create_config_object(**kwargs) + config = create_config_object() parsed_config = look_for_config_in_file(filename=filename) @@ -60,6 +77,9 @@ def load_config(filename="config.toml", **kwargs): update_from_environment_variables(config) + config_from_keyword_arguments = {"api": kwargs} + update_with_other_config(config, other_config=config_from_keyword_arguments) + return config def create_config_object(authentication_token="", **kwargs): @@ -193,10 +213,11 @@ def update_from_environment_variables(config): for key in sectionconfig: env = env_prefix + key.upper() if env in os.environ: - config[section][key] = parse_environment_variable(env, os.environ[env]) + config[section][key] = parse_environment_variable(key, os.environ[env]) BOOLEAN_KEYS = ("debug", "use_ssl") +INTEGER_KEYS = ("port") def parse_environment_variable(key, value): """Parse a value stored in an environment variable. @@ -219,8 +240,10 @@ def parse_environment_variable(key, value): return False else: raise ValueError("Boolean could not be parsed") - else: - return value + elif key in INTEGER_KEYS: + return int(value) + + return value DEFAULT_CONFIG = create_config_object() configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index ede5e2499..3bb232d09 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -31,8 +31,8 @@ authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true -debug = false port = 443 +debug = false """ TEST_FILE_ONE_VALUE = """\ @@ -46,22 +46,110 @@ "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", "hostname": "localhost", "use_ssl": True, - "debug": False, "port": 443, + "debug": False, } } OTHER_EXPECTED_CONFIG = { "api": { - "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "authentication_token": "SomeAuth", "hostname": "SomeHost", "use_ssl": False, - "debug": True, "port": 56, + "debug": True, } } -class TestCreteConfigObject: +environment_variables = [ + "SF_API_AUTHENTICATION_TOKEN", + "SF_API_HOSTNAME", + "SF_API_USE_SSL", + "SF_API_DEBUG", + "SF_API_PORT" + ] + +def tear_down_all_env_var_defs(): + """Making sure that no environment variables are defined after.""" + for key in environment_variables: + if key in os.environ: + del os.environ[key] + assert key not in os.environ + +class TestLoadConfig: + + def test_not_found_warning(self, caplog): + """Test that a warning is raised if no configuration file found.""" + + conf.load_config(filename='NotAFileName') + assert "No Strawberry Fields configuration file found." in caplog.text + + def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): + """Test that the keyword arguments passed to load_config take + precedence over data in environment variables or data in a + configuration file.""" + + filename = tmpdir.join("config.toml") + + with open(filename, "w") as f: + f.write(TEST_FILE) + + os.environ["SF_API_AUTHENTICATION_TOKEN"] = "NotOurAuth" + os.environ["SF_API_HOSTNAME"] = "NotOurHost" + os.environ["SF_API_USE_SSL"] = "True" + os.environ["SF_API_DEBUG"] = "False" + os.environ["SF_API_PORT"] = "42" + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: tmpdir) + configuration = conf.load_config(authentication_token="SomeAuth", + hostname="SomeHost", + use_ssl=False, + debug=True, + port=56 + ) + + assert configuration == OTHER_EXPECTED_CONFIG + + def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir): + """Test that the data in environment variables precedence over data in + a configuration file.""" + + filename = tmpdir.join("config.toml") + + with open(filename, "w") as f: + f.write(TEST_FILE) + + os.environ["SF_API_AUTHENTICATION_TOKEN"] = "SomeAuth" + os.environ["SF_API_HOSTNAME"] = "SomeHost" + os.environ["SF_API_USE_SSL"] = "False" + os.environ["SF_API_DEBUG"] = "True" + os.environ["SF_API_PORT"] = "56" + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: tmpdir) + configuration = conf.load_config() + + assert configuration == OTHER_EXPECTED_CONFIG + + tear_down_all_env_var_defs() + + def test_conf_file_loads_well(self, monkeypatch, tmpdir): + """Test that the data in environment variables precedence over data in + a configuration file.""" + + filename = tmpdir.join("config.toml") + + with open(filename, "w") as f: + f.write(TEST_FILE) + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: tmpdir) + configuration = conf.load_config() + + assert configuration == EXPECTED_CONFIG + +class TestCreateConfigObject: """Test the creation of a configuration object""" def test_empty_config_object(self): @@ -80,50 +168,13 @@ def test_config_object_with_authentication_token(self): assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG def test_config_object_every_keyword_argument(self): - """Test that passing only the authentication token creates the expected + """Test that passing every keyword argument creates the expected configuration object.""" - assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135", + assert conf.create_config_object(authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, debug=True, port=56) == OTHER_EXPECTED_CONFIG - -class TestConfiguration: - """Tests for the configuration class""" - - def test_load_config_file(self, tmpdir, monkeypatch): - filename = tmpdir.join("config.toml") - - with open(filename, "w") as f: - f.write(TEST_FILE) - - config_file = conf.load_config_file(filepath=filename) - - assert config_file == EXPECTED_CONFIG - -class TestLoadConfig: - - def test_not_found_warning(self, caplog): - """Test that a warning is raised if no configuration file found.""" - - conf.load_config(filename='NotAFileName') - assert "No Strawberry Fields configuration file found." in caplog.text - - def test_check_call_order(self, monkeypatch): - - def mock_look_for_config_in_file(*args, **kwargs): - call_history.append(2) - return "NotNone" - - call_history = [] - with monkeypatch.context() as m: - m.setattr(conf, "create_config_object", lambda *args: call_history.append(1)) - m.setattr(conf, "look_for_config_in_file", mock_look_for_config_in_file) - m.setattr(conf, "update_with_other_config", lambda *args, **kwargs: call_history.append(3)) - m.setattr(conf, "update_from_environment_variables", lambda *args: call_history.append(4)) - conf.load_config() - assert call_history == [1,2,3,4] - class TestLookForConfigInFile: def test_loading_current_directory(self, tmpdir, monkeypatch): @@ -207,7 +258,21 @@ def raise_wrapper(ex): assert config_file is None - class TestUpdateWithOtherConfig: +class TestLoadConfiguration: + """Tests for the configuration class""" + + def test_load_config_file(self, tmpdir, monkeypatch): + filename = tmpdir.join("config.toml") + + with open(filename, "w") as f: + f.write(TEST_FILE) + + config_file = conf.load_config_file(filepath=filename) + + assert config_file == EXPECTED_CONFIG + + +class TestUpdateWithOtherConfig: def test_update_entire_config(self): config = conf.create_config_object() @@ -257,63 +322,71 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi assert config["api"][specific_key] == "PlaceHolder" assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) -environment_variables = [ - "SF_API_AUTHENTICATION_TOKEN", - "SF_API_HOSTNAME", - "SF_API_USE_SSL", - "SF_API_DEBUG", - "SF_API_PORT" - ] + + +value_mapping = [ + ("SF_API_AUTHENTICATION_TOKEN","SomeAuth"), + ("SF_API_HOSTNAME","SomeHost"), + ("SF_API_USE_SSL","False"), + ("SF_API_PORT","56"), + ("SF_API_DEBUG","True") + ] + +parsed_values_mapping = { + "SF_API_AUTHENTICATION_TOKEN": "SomeAuth", + "SF_API_HOSTNAME": "SomeHost", + "SF_API_USE_SSL": False, + "SF_API_PORT": 56, + "SF_API_DEBUG": True, + } class TestUpdateFromEnvironmentalVariables: def test_all_environment_variables_defined(self): - for key in environment_variables: - os.environ[key] = "PlaceHolder" + for key, value in value_mapping: + os.environ[key] = value config = conf.create_config_object() - assert not any(v == "PlaceHolder" for k, v in config["api"].items()) + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): + assert v != parsed_value conf.update_from_environment_variables(config) - assert all(v == "PlaceHolder" for k, v in config["api"].items()) - def test_one_environment_variable_defined(self, specific_env_var, specific_key): - # Tear-down - for key in environment_variables: - del os.environ[key] - assert key not in os.environ + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): + assert v == parsed_value + + tear_down_all_env_var_defs() - environment_variables_with_keys = [ - ("SF_API_AUTHENTICATION_TOKEN","authentication_token"), - ("SF_API_HOSTNAME","hostname"), - ("SF_API_USE_SSL","use_ssl"), - ("SF_API_DEBUG","debug"), - ("SF_API_PORT","port") + environment_variables_with_keys_and_values = [ + ("SF_API_AUTHENTICATION_TOKEN","authentication_token","SomeAuth"), + ("SF_API_HOSTNAME","hostname","SomeHost"), + ("SF_API_USE_SSL","use_ssl","False"), + ("SF_API_PORT","port", "56"), + ("SF_API_DEBUG","debug","True") ] - @pytest.mark.parametrize("specific_env_var, specific_key", environment_variables_with_keys) - def test_one_environment_variable_defined(self, specific_env_var, specific_key): - # Making sure that no environment variable was defined previously - for key in environment_variables: - if key in os.environ: - del os.environ[key] - assert key not in os.environ + @pytest.mark.parametrize("env_var, key, value", environment_variables_with_keys_and_values) + def test_one_environment_variable_defined(self, env_var, key, value): - os.environ[specific_env_var] = "PlaceHolder" + tear_down_all_env_var_defs() + os.environ[env_var] = value config = conf.create_config_object() - assert not any(v == "PlaceHolder" for k, v in config["api"].items()) + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): + assert v != parsed_value conf.update_from_environment_variables(config) - assert config["api"][specific_key] == "PlaceHolder" + assert config["api"][key] == parsed_values_mapping[env_var] - assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) + for v, (key, parsed_value) in zip(config["api"].values(), parsed_values_mapping.items()): + if key != env_var: + assert v != parsed_value # Tear-down - del os.environ[specific_env_var] - assert specific_env_var not in os.environ + del os.environ[env_var] + assert env_var not in os.environ - def test_parse_environment_variable(self, monkeypatch): + def test_parse_environment_variable_boolean(self, monkeypatch): monkeypatch.setattr(conf, "BOOLEAN_KEYS", ("some_boolean",)) assert conf.parse_environment_variable("some_boolean", "true") is True assert conf.parse_environment_variable("some_boolean", "True") is True @@ -329,3 +402,8 @@ def test_parse_environment_variable(self, monkeypatch): assert conf.parse_environment_variable("not_a_boolean","something_else") == "something_else" + def test_parse_environment_variable_integer(self, monkeypatch): + monkeypatch.setattr(conf, "INTEGER_KEYS", ("some_integer",)) + assert conf.parse_environment_variable("some_integer", "123") == 123 + assert conf.parse_environment_variable("not_an_integer","something_else") == "something_else" + From fc5619adc2cdf0675a11c17a0914bb88bf83c90c Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 15:13:25 -0500 Subject: [PATCH 153/335] Test docstrings --- tests/frontend/test_configuration.py | 106 +++++++++++++++------------ 1 file changed, 59 insertions(+), 47 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 3bb232d09..ba323ac7c 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -77,6 +77,7 @@ def tear_down_all_env_var_defs(): assert key not in os.environ class TestLoadConfig: + """Tests for the load_config function.""" def test_not_found_warning(self, caplog): """Test that a warning is raised if no configuration file found.""" @@ -176,6 +177,7 @@ def test_config_object_every_keyword_argument(self): debug=True, port=56) == OTHER_EXPECTED_CONFIG class TestLookForConfigInFile: + """Tests for the look_for_config_in_file function.""" def test_loading_current_directory(self, tmpdir, monkeypatch): """Test that the default configuration file is loaded from the current @@ -258,10 +260,8 @@ def raise_wrapper(ex): assert config_file is None -class TestLoadConfiguration: - """Tests for the configuration class""" - def test_load_config_file(self, tmpdir, monkeypatch): + """Tests that configuration is loaded correctly from a TOML file.""" filename = tmpdir.join("config.toml") with open(filename, "w") as f: @@ -271,57 +271,59 @@ def test_load_config_file(self, tmpdir, monkeypatch): assert config_file == EXPECTED_CONFIG - class TestUpdateWithOtherConfig: + """Tests for the update_with_other_config function.""" - def test_update_entire_config(self): - config = conf.create_config_object() - assert config["api"]["authentication_token"] == "" - - conf.update_with_other_config(config, EXPECTED_CONFIG) - assert config == EXPECTED_CONFIG - - ONLY_AUTH_CONFIG = { - "api": { - "authentication_token": "PlaceHolder", - } - } - - ONLY_HOST_CONFIG = { - "api": { - "hostname": "PlaceHolder", - } - } - - ONLY_SSL_CONFIG = { - "api": { - "use_ssl": "PlaceHolder", - } - } + def test_update_entire_config(self): + """Tests that the entire configuration object is updated.""" - ONLY_DEBUG_CONFIG = { - "api": { - "debug": "PlaceHolder", - } - } + config = conf.create_config_object() + assert config["api"]["authentication_token"] == "" + + conf.update_with_other_config(config, EXPECTED_CONFIG) + assert config == EXPECTED_CONFIG + + ONLY_AUTH_CONFIG = { + "api": { + "authentication_token": "PlaceHolder", + } + } + + ONLY_HOST_CONFIG = { + "api": { + "hostname": "PlaceHolder", + } + } + + ONLY_SSL_CONFIG = { + "api": { + "use_ssl": "PlaceHolder", + } + } - ONLY_PORT_CONFIG = { - "api": {"port": "PlaceHolder"} - } + ONLY_DEBUG_CONFIG = { + "api": { + "debug": "PlaceHolder", + } + } - @pytest.mark.parametrize("specific_key, config_to_update_with", [("authentication_token",ONLY_AUTH_CONFIG), - ("hostname",ONLY_HOST_CONFIG), - ("use_ssl",ONLY_SSL_CONFIG), - ("debug",ONLY_DEBUG_CONFIG), - ("port",ONLY_PORT_CONFIG)]) - def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): - config = conf.create_config_object() - assert config["api"][specific_key] != "PlaceHolder" + ONLY_PORT_CONFIG = { + "api": {"port": "PlaceHolder"} + } - conf.update_with_other_config(config, config_to_update_with) - assert config["api"][specific_key] == "PlaceHolder" - assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) + @pytest.mark.parametrize("specific_key, config_to_update_with", [("authentication_token",ONLY_AUTH_CONFIG), + ("hostname",ONLY_HOST_CONFIG), + ("use_ssl",ONLY_SSL_CONFIG), + ("debug",ONLY_DEBUG_CONFIG), + ("port",ONLY_PORT_CONFIG)]) + def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): + """Tests that only one item is updated in the configuration object is updated.""" + config = conf.create_config_object() + assert config["api"][specific_key] != "PlaceHolder" + conf.update_with_other_config(config, config_to_update_with) + assert config["api"][specific_key] == "PlaceHolder" + assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) value_mapping = [ @@ -341,8 +343,11 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi } class TestUpdateFromEnvironmentalVariables: + """Tests for the update_from_environment_variables function.""" def test_all_environment_variables_defined(self): + """Tests that the configuration object is updated correctly when all + the environment variables are defined.""" for key, value in value_mapping: os.environ[key] = value @@ -367,6 +372,8 @@ def test_all_environment_variables_defined(self): @pytest.mark.parametrize("env_var, key, value", environment_variables_with_keys_and_values) def test_one_environment_variable_defined(self, env_var, key, value): + """Tests that the configuration object is updated correctly when only + one environment variable is defined.""" tear_down_all_env_var_defs() os.environ[env_var] = value @@ -387,6 +394,8 @@ def test_one_environment_variable_defined(self, env_var, key, value): assert env_var not in os.environ def test_parse_environment_variable_boolean(self, monkeypatch): + """Tests that boolean values can be parsed correctly from environment + variables.""" monkeypatch.setattr(conf, "BOOLEAN_KEYS", ("some_boolean",)) assert conf.parse_environment_variable("some_boolean", "true") is True assert conf.parse_environment_variable("some_boolean", "True") is True @@ -403,6 +412,9 @@ def test_parse_environment_variable_boolean(self, monkeypatch): assert conf.parse_environment_variable("not_a_boolean","something_else") == "something_else" def test_parse_environment_variable_integer(self, monkeypatch): + """Tests that integer values can be parsed correctly from environment + variables.""" + monkeypatch.setattr(conf, "INTEGER_KEYS", ("some_integer",)) assert conf.parse_environment_variable("some_integer", "123") == 123 assert conf.parse_environment_variable("not_an_integer","something_else") == "something_else" From 9109a468bb618707f45276754b6eae07fd1cbddb Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 15:18:07 -0500 Subject: [PATCH 154/335] Default config refactor --- strawberryfields/configuration.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 6c7b66527..2f2f9cf49 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -24,16 +24,6 @@ log.getLogger() -DEFAULT_CONFIG = { - "api": { - "authentication_token": "", - "hostname": "localhost", - "use_ssl": True, - "port": 443, - "debug": False} -} - - class ConfigurationError(Exception): """Exception used for configuration errors""" From 653f566c5c9e9516e16dc9f6e3b497904ab1a8f7 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 15:26:23 -0500 Subject: [PATCH 155/335] Add absolute path test --- strawberryfields/configuration.py | 2 +- tests/frontend/test_configuration.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 2f2f9cf49..ec11a7c62 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -146,7 +146,7 @@ def look_for_config_in_file(filename="config.toml"): return parsed_config def load_config_file(filepath): - """Load a configuration from a TOML formatted file. + """Load a configuration object from a TOML formatted file. Args: filepath (str): path to the configuration file diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index ba323ac7c..7ba946ecc 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -271,6 +271,21 @@ def test_load_config_file(self, tmpdir, monkeypatch): assert config_file == EXPECTED_CONFIG + def test_loading_absolute_path(self, tmpdir, monkeypatch): + """Test that the default configuration file can be loaded + via an absolute path.""" + filename = os.path.abspath(tmpdir.join("config.toml")) + + + with open(filename, "w") as f: + f.write(TEST_FILE) + + + os.environ["SF_CONF"] = "" + config_file = conf.load_config_file(filepath=filename) + + assert config_file == EXPECTED_CONFIG + class TestUpdateWithOtherConfig: """Tests for the update_with_other_config function.""" From 6f50208d9a0cfc0022fd96d2fd29e14cbd0c0570 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Mon, 24 Feb 2020 15:36:40 -0500 Subject: [PATCH 156/335] Tests --- strawberryfields/engine.py | 8 ++++++-- tests/frontend/test_engine.py | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index accc92f5e..e196b98a0 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -89,6 +89,9 @@ def __init__(self, samples, is_stateful=True): self._state = None self._is_stateful = is_stateful + # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array + if len(np.shape(samples)) > 1: + samples = np.stack(samples, 1) self._samples = samples @property @@ -397,7 +400,7 @@ def _broadcast_nones(val, dim): prev = p if self.samples is not None: - return Result(np.array(self.samples).T) + return Result(self.samples.copy()) class LocalEngine(BaseEngine): @@ -809,6 +812,7 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: "Job creation failed: {}".format(self._format_error_message(response)) ) + # TODO this is not deployed on the platform side yet def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: """Gets a list of jobs created by the user, optionally filtered by datetime. @@ -821,7 +825,7 @@ def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: Returns: List[strawberryfields.engine.Job]: the jobs """ - response = self._get("/jobs?page[size]={}".format(self.MAX_JOBS_REQUESTED)) + response = self._get("/jobs?size={}".format(self.MAX_JOBS_REQUESTED)) if response.status_code == 200: return [ Job(id_=info["id"], status=info["status"], connection=self) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 4e0a5d057..6fdc6c36f 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -375,7 +375,7 @@ def test_get_job_result(self, connection, monkeypatch): result = connection.get_job_result("123") - assert np.array_equal(result.samples, result_samples) + assert np.array_equal(result.samples.T, result_samples) def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" @@ -434,7 +434,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples, result_expected) + assert np.array_equal(result.samples.T, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -464,7 +464,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert np.array_equal(job.result.samples, result_expected) + assert np.array_equal(job.result.samples.T, result_expected) with pytest.raises(AttributeError): _ = job.result.state From 3111492059f27f768dc867d03ab33a2a4236de9f Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Mon, 24 Feb 2020 15:50:44 -0500 Subject: [PATCH 157/335] Tests --- strawberryfields/engine.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index d9055db39..edae71939 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -89,6 +89,9 @@ def __init__(self, samples, is_stateful=True): self._state = None self._is_stateful = is_stateful + # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array + if len(np.shape(samples)) > 1: + samples = np.stack(samples, 1) self._samples = samples @property @@ -397,7 +400,7 @@ def _broadcast_nones(val, dim): prev = p if self.samples is not None: - return Result(np.array(self.samples).T) + return Result(self.samples.copy()) class LocalEngine(BaseEngine): @@ -879,7 +882,6 @@ def get_job_result(self, job_id: str) -> Result: buf.write(response.content) buf.seek(0) samples = np.load(buf) - return Result(samples, is_stateful=False) raise RequestFailedError(self._format_error_message(response)) From ffa9c44d759c3e7d7c89915d7e2c76a3c9c12cde Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Mon, 24 Feb 2020 15:57:11 -0500 Subject: [PATCH 158/335] Tests --- tests/frontend/test_engine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index c2b2c04fc..6fdc6c36f 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -434,7 +434,7 @@ def test_run_complete(self, connection, prog, monkeypatch): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples, result_expected) + assert np.array_equal(result.samples.T, result_expected) with pytest.raises(AttributeError): _ = result.state @@ -464,7 +464,7 @@ def test_run_async(self, connection, prog, monkeypatch): job.refresh() assert job.status == JobStatus.COMPLETE - assert np.array_equal(job.result.samples, result_expected) + assert np.array_equal(job.result.samples.T, result_expected) with pytest.raises(AttributeError): _ = job.result.state From 984c5a5ebeea139022a0680f757e9b50ab27bd27 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 16:04:04 -0500 Subject: [PATCH 159/335] Saving path, linting, isort --- strawberryfields/configuration.py | 40 +++++++++++++++++-------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index ec11a7c62..d5ff65b5a 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -15,8 +15,8 @@ This module contains the :class:`Configuration` class, which is used to load, store, save, and modify configuration options for Strawberry Fields. """ -import os import logging as log +import os import toml from appdirs import user_config_dir @@ -28,7 +28,6 @@ class ConfigurationError(Exception): """Exception used for configuration errors""" -# TODO: somehow store the latest configuration path that was used def load_config(filename="config.toml", **kwargs): """Load configuration from keyword arguments, configuration file or environment variables. @@ -58,7 +57,7 @@ def load_config(filename="config.toml", **kwargs): """ config = create_config_object() - parsed_config = look_for_config_in_file(filename=filename) + parsed_config, _ = look_for_config_in_file(filename=filename) if parsed_config is not None: update_with_other_config(config, other_config=parsed_config) @@ -116,9 +115,10 @@ def look_for_config_in_file(filename="config.toml"): .. note:: The following directories are checked (in the following order): - -The current working directory - -The directory specified by the environment variable SF_CONF (if specified) - -The user configuration directory (if specified) + + * The current working directory + * The directory specified by the environment variable SF_CONF (if specified) + * The user configuration directory (if specified) Keyword arguments: filename (str): the configuration file to look for @@ -141,9 +141,9 @@ def look_for_config_in_file(filename="config.toml"): parsed_config = load_config_file(filepath) break except FileNotFoundError: - parsed_config = None + parsed_config, filepath = None, None - return parsed_config + return parsed_config, filepath def load_config_file(filepath): """Load a configuration object from a TOML formatted file. @@ -185,18 +185,19 @@ def update_from_environment_variables(config): .. note:: Currently the following environment variables are checked: - -SF_API_AUTHENTICATION_TOKEN - -SF_API_HOSTNAME - -SF_API_USE_SSL - -SF_API_DEBUG - -SF_API_PORT + + * SF_API_AUTHENTICATION_TOKEN + * SF_API_HOSTNAME + * SF_API_USE_SSL + * SF_API_DEBUG + * SF_API_PORT Args: config (dict of str: (dict of str: Union[str, bool, int])): the configuration to be updated Returns: dict of str: (dict of str: Union[str, bool, int])): the updated - configuration + configuration """ for section, sectionconfig in config.items(): env_prefix = "SF_{}_".format(section.upper()) @@ -226,14 +227,17 @@ def parse_environment_variable(key, value): if key in BOOLEAN_KEYS: if value in trues: return True - elif value in falses: + + if value in falses: return False - else: - raise ValueError("Boolean could not be parsed") - elif key in INTEGER_KEYS: + + raise ValueError("Boolean could not be parsed") + + if key in INTEGER_KEYS: return int(value) return value DEFAULT_CONFIG = create_config_object() configuration = load_config() +config_file_path = look_for_config_in_file()[1] From b90ca00aac5658662a7f32466a5ecd0645ec5c29 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 16:26:35 -0500 Subject: [PATCH 160/335] Modify dict formatting in docstring --- strawberryfields/configuration.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index d5ff65b5a..bf88b75ad 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -52,7 +52,7 @@ def load_config(filename="config.toml", **kwargs): debug (bool): determines if the debugging mode is requested Returns: - dict of str: (dict of str: Union[str, bool, int]): the configuration + dict[str, dict[str, Union[str, bool, int]]]: the configuration object """ config = create_config_object() @@ -90,7 +90,7 @@ def create_config_object(authentication_token="", **kwargs): debug (bool): determines if the debugging mode is requested Returns: - dict of str: (dict of str: Union[str, bool, int]): the configuration + dict[str, dict[str, Union[str, bool, int]]]: the configuration object """ hostname = kwargs.get("hostname", "localhost") @@ -124,7 +124,7 @@ def look_for_config_in_file(filename="config.toml"): filename (str): the configuration file to look for Returns: - dict of str: (dict of str: Union[str, bool, int]) or None: the + dict[str, dict[str, Union[str, bool, int]]] or None: the configuration object that was loaded """ @@ -152,7 +152,7 @@ def load_config_file(filepath): filepath (str): path to the configuration file Returns: - dict of str: (dict of str: Union[str, bool, int]): the configuration + dict[str, dict[str, Union[str, bool, int]]]: the configuration object that was loaded """ with open(filepath, "r") as f: @@ -163,13 +163,13 @@ def update_with_other_config(config, other_config): """Updates the current configuration object with another one. Args: - config (dict of str: (dict of str: Union[str, bool, int])): the + config (dict[str, dict[str, Union[str, bool, int]]]): the configuration to be updated - other_config (dict of str: (dict of str: Union[str, bool, int])): the + other_config (dict[str, dict[str, Union[str, bool, int]]]): the configuration used for updating Returns: - dict of str: (dict of str: Union[str, bool, int])): the updated + dict[str, dict[str, Union[str, bool, int]]]): the updated configuration """ # Here an example for sectionconfig is API @@ -193,10 +193,10 @@ def update_from_environment_variables(config): * SF_API_PORT Args: - config (dict of str: (dict of str: Union[str, bool, int])): the + config (dict[str, dict[str, Union[str, bool, int]]]): the configuration to be updated Returns: - dict of str: (dict of str: Union[str, bool, int])): the updated + dict[str, dict[str, Union[str, bool, int]]]): the updated configuration """ for section, sectionconfig in config.items(): From 0e82fa1a5e334b743657950072ea70b268e30d66 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Mon, 24 Feb 2020 16:27:38 -0500 Subject: [PATCH 161/335] Dimensions --- starship | 4 ++-- strawberryfields/engine.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/starship b/starship index b97905859..d0f04f641 100755 --- a/starship +++ b/starship @@ -59,6 +59,6 @@ if __name__ == "__main__": if result and result.samples is not None: if args.output: with open(args.output, "w") as file: - file.write(str(result.samples)) + file.write(str(result.samples.T)) else: - sys.stdout.write(str(result.samples)) + sys.stdout.write(str(result.samples.T)) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index edae71939..0fa24edc2 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -159,7 +159,7 @@ def __init__(self, backend, backend_options=None): self.backend_options = backend_options.copy() # dict is mutable #: List[Program]: list of Programs that have been run self.run_progs = [] - #: List[List[Number]]: latest measurement results, shape == (shots, modes) + #: List[List[Number]]: latest measurement results, shape == (modes, shots) self.samples = None if isinstance(backend, str): From 2622e5ab57c303fa615ef72b88f79c64cb8ecf6c Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Mon, 24 Feb 2020 16:47:21 -0500 Subject: [PATCH 162/335] Linting, comments, cleanup --- strawberryfields/engine.py | 22 +++++++++++----------- tests/frontend/test_engine.py | 10 ++++++---- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 0fa24edc2..81af7215f 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -812,7 +812,6 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: "Job creation failed: {}".format(self._format_error_message(response)) ) - # TODO this is not deployed on the platform side yet def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: """Gets a list of jobs created by the user, optionally filtered by datetime. @@ -825,15 +824,16 @@ def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: Returns: List[strawberryfields.engine.Job]: the jobs """ - response = self._get("/jobs?size={}".format(self.MAX_JOBS_REQUESTED)) - if response.status_code == 200: - return [ - Job(id_=info["id"], status=info["status"], connection=self) - for info in response.json()["data"] - if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) - > after - ] - raise RequestFailedError(self._format_error_message(response)) + # response = self._get("/jobs?size={}".format(self.MAX_JOBS_REQUESTED)) + # if response.status_code == 200: + # return [ + # Job(id_=info["id"], status=info["status"], connection=self) + # for info in response.json()["data"] + # if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) + # > after + # ] + # raise RequestFailedError(self._format_error_message(response)) + raise NotImplementedError("This feature is not yet implemented") def get_job(self, job_id: str) -> Job: """Gets a job. diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 6fdc6c36f..5c673c6a4 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,7 +34,7 @@ pytestmark = pytest.mark.frontend -# pylint: disable=redefined-outer-name,no-self-use +# pylint: disable=redefined-outer-name,no-self-use,bad-continuation,expression-not-assigned,pointless-statement @pytest.fixture @@ -44,7 +44,7 @@ def eng(backend): @pytest.fixture -def prog(backend): +def prog(): """Program fixture.""" prog = sf.Program(2) with prog.context as q: @@ -65,7 +65,7 @@ def test_bad_backend(self): with pytest.raises( TypeError, match="backend must be a string or a BaseBackend instance" ): - eng = sf.LocalEngine(0) + _ = sf.LocalEngine(0) class TestEngineProgramInteraction: @@ -294,6 +294,7 @@ def test_create_job_error(self, prog, connection, monkeypatch): with pytest.raises(RequestFailedError): connection.create_job("chip2", prog, 1) + @pytest.mark.skip(reason="method not yet implemented") def test_get_all_jobs(self, connection, monkeypatch): """Tests a successful job list request.""" jobs = [ @@ -312,6 +313,7 @@ def test_get_all_jobs(self, connection, monkeypatch): assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] + @pytest.mark.skip(reason="method not yet implemented") def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list request.""" monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) From 18a5d33f342530d87b3c83b48634c39cb327d773 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:08:09 -0500 Subject: [PATCH 163/335] Update tests/frontend/test_configuration.py Co-Authored-By: Nathan Killoran --- tests/frontend/test_configuration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 7ba946ecc..f4c93e01f 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -113,7 +113,7 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): assert configuration == OTHER_EXPECTED_CONFIG def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir): - """Test that the data in environment variables precedence over data in + """Test that the data in environment variables take precedence over data in a configuration file.""" filename = tmpdir.join("config.toml") @@ -433,4 +433,3 @@ def test_parse_environment_variable_integer(self, monkeypatch): monkeypatch.setattr(conf, "INTEGER_KEYS", ("some_integer",)) assert conf.parse_environment_variable("some_integer", "123") == 123 assert conf.parse_environment_variable("not_an_integer","something_else") == "something_else" - From 42071b000ba372316341e6a35845ec4fdb781e19 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:18:41 -0500 Subject: [PATCH 164/335] Modifying docstrings based on comments --- tests/frontend/test_configuration.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 7ba946ecc..e7db01f47 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -136,8 +136,8 @@ def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tear_down_all_env_var_defs() def test_conf_file_loads_well(self, monkeypatch, tmpdir): - """Test that the data in environment variables precedence over data in - a configuration file.""" + """Test that the load_config function loads a configuration from a TOML + file correctly.""" filename = tmpdir.join("config.toml") @@ -193,7 +193,7 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): def test_loading_env_variable(self, tmpdir, monkeypatch): """Test that the correct configuration file is found using the correct - environment variable. + environment variable (SF_CONF). This is a test case for when there is no configuration file in the current directory.""" @@ -207,10 +207,8 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - - # Need to mock the module specific function # m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - # os.environ["SF_CONF"] = lambda: FileNotFoundError + config_file = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") From c2e79d460269ffcf98549c0fb84f97d0c969b9b1 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:32:58 -0500 Subject: [PATCH 165/335] Modifying tests such that look_for_config_in_file returns a tuple; adding user_config_dir mocking logic to test --- tests/frontend/test_configuration.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 254766d4e..9d555946d 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -187,7 +187,7 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) m.setattr(conf, "load_config_file", lambda filepath: filepath) - config_file = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join(filename) @@ -207,9 +207,9 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - # m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") + m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - config_file = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") def test_loading_user_config_dir(self, tmpdir, monkeypatch): @@ -231,7 +231,7 @@ def raise_wrapper(ex): m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.look_for_config_in_file(filename=filename) assert config_file == tmpdir.join("config.toml") def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): @@ -256,7 +256,7 @@ def raise_wrapper(ex): config_file = conf.look_for_config_in_file(filename=filename) - assert config_file is None + assert config_file == (None, None) def test_load_config_file(self, tmpdir, monkeypatch): """Tests that configuration is loaded correctly from a TOML file.""" From b912be1cd9e743fd3b8d94dec58eb47540ced192 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:34:02 -0500 Subject: [PATCH 166/335] Update tests/frontend/test_configuration.py Co-Authored-By: Nathan Killoran --- tests/frontend/test_configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 9d555946d..eb7a14b33 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -235,7 +235,7 @@ def raise_wrapper(ex): assert config_file == tmpdir.join("config.toml") def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): - """Test that the the look_for_config_in_file returns None if the + """Test that the look_for_config_in_file returns None if the configuration file is nowhere to be found. This is a test case for when there is no configuration file: From 2cef06742e1ba9034e0e23ebaf20b0c921d8c6b9 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:38:54 -0500 Subject: [PATCH 167/335] Updating test data --- tests/frontend/test_configuration.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 9d555946d..4c93525ca 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -297,31 +297,23 @@ def test_update_entire_config(self): assert config == EXPECTED_CONFIG ONLY_AUTH_CONFIG = { - "api": { - "authentication_token": "PlaceHolder", - } - } + "api": {"authentication_token": "PlaceHolder"} + } ONLY_HOST_CONFIG = { - "api": { - "hostname": "PlaceHolder", - } - } + "api": {"hostname": "PlaceHolder",} + } ONLY_SSL_CONFIG = { - "api": { - "use_ssl": "PlaceHolder", - } + "api": {"use_ssl": "PlaceHolder"} } ONLY_DEBUG_CONFIG = { - "api": { - "debug": "PlaceHolder", - } + "api": {"debug": "PlaceHolder"} } ONLY_PORT_CONFIG = { - "api": {"port": "PlaceHolder"} + "api": {"port": "PlaceHolder"} } @pytest.mark.parametrize("specific_key, config_to_update_with", [("authentication_token",ONLY_AUTH_CONFIG), @@ -338,7 +330,6 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi assert config["api"][specific_key] == "PlaceHolder" assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) - value_mapping = [ ("SF_API_AUTHENTICATION_TOKEN","SomeAuth"), ("SF_API_HOSTNAME","SomeHost"), From 39538d332d5d1bda6e42aa80ae3db012cf3c5133 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:39:01 -0500 Subject: [PATCH 168/335] Update tests/frontend/test_configuration.py Co-Authored-By: Nathan Killoran --- tests/frontend/test_configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index eb7a14b33..701a435ec 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -330,7 +330,7 @@ def test_update_entire_config(self): ("debug",ONLY_DEBUG_CONFIG), ("port",ONLY_PORT_CONFIG)]) def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): - """Tests that only one item is updated in the configuration object is updated.""" + """Tests that only one item in the configuration object is updated.""" config = conf.create_config_object() assert config["api"][specific_key] != "PlaceHolder" From b5e822908dbf4567f9630c075bc5e132a16511ea Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:46:17 -0500 Subject: [PATCH 169/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index bf88b75ad..d091e87dc 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -34,7 +34,7 @@ def load_config(filename="config.toml", **kwargs): .. note:: - The configuration object (that is a nested dictionary) would be created based + The configuration object (a nested dictionary) will be created based on the following (order defines the importance, going from most important to least important): From 47e483a1b735dd4e33d400158afbbf3d521d02ac Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 18:47:29 -0500 Subject: [PATCH 170/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index d091e87dc..b300e6f43 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -75,7 +75,7 @@ def create_config_object(authentication_token="", **kwargs): """Create a configuration object that stores configuration related data organized into sections. - Currently API related configuration options are defined. This function + Currently API-related configuration options are defined. This function takes into consideration only pre-defined options. If called without passing any keyword arguments, then a default From f8e2c29677c00a4f2588837a2ecaf92821c7b09c Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 19:10:59 -0500 Subject: [PATCH 171/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index b300e6f43..a5f0ed7e8 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -184,7 +184,7 @@ def update_from_environment_variables(config): .. note:: - Currently the following environment variables are checked: + The following environment variables are checked: * SF_API_AUTHENTICATION_TOKEN * SF_API_HOSTNAME From 03e2e046f546802008c11bb1ea118ebd448c98b1 Mon Sep 17 00:00:00 2001 From: antalszava Date: Mon, 24 Feb 2020 19:11:08 -0500 Subject: [PATCH 172/335] Docstring and comment --- strawberryfields/configuration.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index b300e6f43..84d8487f3 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -75,8 +75,8 @@ def create_config_object(authentication_token="", **kwargs): """Create a configuration object that stores configuration related data organized into sections. - Currently API-related configuration options are defined. This function - takes into consideration only pre-defined options. + The configuration object contains API-related configuration options. This + function takes into consideration only pre-defined options. If called without passing any keyword arguments, then a default configuration object is created. @@ -172,7 +172,7 @@ def update_with_other_config(config, other_config): dict[str, dict[str, Union[str, bool, int]]]): the updated configuration """ - # Here an example for sectionconfig is API + # Here an example for section is API for section, sectionconfig in config.items(): for key in sectionconfig: if key in other_config[section]: From 1f04d5cd5726e30cf47e554f8390ad9437c6307c Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 12:47:41 -0500 Subject: [PATCH 173/335] Docstrings, renaming to load_config_file_if_found, adding defaults to module level attributes --- strawberryfields/configuration.py | 35 ++++++++++++---------------- tests/frontend/test_configuration.py | 12 +++++----- 2 files changed, 21 insertions(+), 26 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 1240ae160..7c5298cae 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -23,6 +23,8 @@ log.getLogger() +BOOLEAN_KEYS = {"debug": False, "use_ssl": True} +INTEGER_KEYS = {"port": 443} class ConfigurationError(Exception): """Exception used for configuration errors""" @@ -34,9 +36,9 @@ def load_config(filename="config.toml", **kwargs): .. note:: - The configuration object (a nested dictionary) will be created based - on the following (order defines the importance, going from most - important to least important): + The configuration dictionary will be created based on the following + (order defines the importance, going from most important to least + important): 1. keyword arguments passed to ``load_config`` 2. data contained in environmental variables (if any) @@ -53,11 +55,10 @@ def load_config(filename="config.toml", **kwargs): Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration - object """ config = create_config_object() - parsed_config, _ = look_for_config_in_file(filename=filename) + parsed_config, _ = load_config_file_if_found(filename=filename) if parsed_config is not None: update_with_other_config(config, other_config=parsed_config) @@ -94,9 +95,9 @@ def create_config_object(authentication_token="", **kwargs): object """ hostname = kwargs.get("hostname", "localhost") - use_ssl = kwargs.get("use_ssl", True) - port = kwargs.get("port", 443) - debug = kwargs.get("debug", False) + use_ssl = kwargs.get("use_ssl", BOOLEAN_KEYS["use_ssl"]) + port = kwargs.get("port", INTEGER_KEYS["port"]) + debug = kwargs.get("debug", BOOLEAN_KEYS["debug"]) config = { "api": { @@ -109,8 +110,9 @@ def create_config_object(authentication_token="", **kwargs): } return config -def look_for_config_in_file(filename="config.toml"): - """Looks for the first configuration file to be found at certain paths. +def load_config_file_if_found(filename="config.toml"): + """Loads the first configuration file found from the defined configuration + directories. .. note:: @@ -124,12 +126,9 @@ def look_for_config_in_file(filename="config.toml"): filename (str): the configuration file to look for Returns: - dict[str, dict[str, Union[str, bool, int]]] or None: the - configuration object that was loaded + (Union[dict[str, dict[str, Union[str, bool, int]]], None], str): the + configuration object that was loaded and the path to the file """ - - # Search the current directory, the directory under environment - # variable SF_CONF, and default user config directory, in that order. current_dir = os.getcwd() sf_env_config_dir = os.environ.get("SF_CONF", "") sf_user_config_dir = user_config_dir("strawberryfields", "Xanadu") @@ -206,10 +205,6 @@ def update_from_environment_variables(config): if env in os.environ: config[section][key] = parse_environment_variable(key, os.environ[env]) - -BOOLEAN_KEYS = ("debug", "use_ssl") -INTEGER_KEYS = ("port") - def parse_environment_variable(key, value): """Parse a value stored in an environment variable. @@ -240,4 +235,4 @@ def parse_environment_variable(key, value): DEFAULT_CONFIG = create_config_object() configuration = load_config() -config_file_path = look_for_config_in_file()[1] +config_file_path = load_config_file_if_found()[1] diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 4580950d1..3d70e4dbd 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -177,7 +177,7 @@ def test_config_object_every_keyword_argument(self): debug=True, port=56) == OTHER_EXPECTED_CONFIG class TestLookForConfigInFile: - """Tests for the look_for_config_in_file function.""" + """Tests for the load_config_file_if_found function.""" def test_loading_current_directory(self, tmpdir, monkeypatch): """Test that the default configuration file is loaded from the current @@ -187,7 +187,7 @@ def test_loading_current_directory(self, tmpdir, monkeypatch): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) m.setattr(conf, "load_config_file", lambda filepath: filepath) - config_file, _ = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.load_config_file_if_found(filename=filename) assert config_file == tmpdir.join(filename) @@ -209,7 +209,7 @@ def raise_wrapper(ex): m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - config_file, _ = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.load_config_file_if_found(filename=filename) assert config_file == tmpdir.join("config.toml") def test_loading_user_config_dir(self, tmpdir, monkeypatch): @@ -231,11 +231,11 @@ def raise_wrapper(ex): m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file, _ = conf.look_for_config_in_file(filename=filename) + config_file, _ = conf.load_config_file_if_found(filename=filename) assert config_file == tmpdir.join("config.toml") def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): - """Test that the look_for_config_in_file returns None if the + """Test that the load_config_file_if_found returns None if the configuration file is nowhere to be found. This is a test case for when there is no configuration file: @@ -254,7 +254,7 @@ def raise_wrapper(ex): m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file = conf.look_for_config_in_file(filename=filename) + config_file = conf.load_config_file_if_found(filename=filename) assert config_file == (None, None) From b82c60f1cbc2d9b0c3c377321f86b93e7ffe4d7c Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 13:44:04 -0500 Subject: [PATCH 174/335] Change function to get_config_filepath and adjust tests as well accordingly; change load_config using it --- strawberryfields/configuration.py | 41 ++++++++--------- tests/frontend/test_configuration.py | 68 +++++++++++++++++----------- 2 files changed, 62 insertions(+), 47 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7c5298cae..567824adc 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -58,17 +58,18 @@ def load_config(filename="config.toml", **kwargs): """ config = create_config_object() - parsed_config, _ = load_config_file_if_found(filename=filename) + config_filepath = get_config_filepath(filename=filename) - if parsed_config is not None: - update_with_other_config(config, other_config=parsed_config) + if config_filepath is not None: + loaded_config = load_config_file(config_filepath) + update_config(config, other_config=loaded_config) else: log.info("No Strawberry Fields configuration file found.") update_from_environment_variables(config) config_from_keyword_arguments = {"api": kwargs} - update_with_other_config(config, other_config=config_from_keyword_arguments) + update_config(config, other_config=config_from_keyword_arguments) return config @@ -110,9 +111,9 @@ def create_config_object(authentication_token="", **kwargs): } return config -def load_config_file_if_found(filename="config.toml"): - """Loads the first configuration file found from the defined configuration - directories. +def get_config_filepath(filename="config.toml"): + """Get the filepath of the first configuration file found from the defined + configuration directories (if any). .. note:: @@ -126,8 +127,8 @@ def load_config_file_if_found(filename="config.toml"): filename (str): the configuration file to look for Returns: - (Union[dict[str, dict[str, Union[str, bool, int]]], None], str): the - configuration object that was loaded and the path to the file + Union[str, None]: the filepath to the configuration file or None, if + no file was found """ current_dir = os.getcwd() sf_env_config_dir = os.environ.get("SF_CONF", "") @@ -136,13 +137,10 @@ def load_config_file_if_found(filename="config.toml"): directories = [current_dir, sf_env_config_dir, sf_user_config_dir] for directory in directories: filepath = os.path.join(directory, filename) - try: - parsed_config = load_config_file(filepath) - break - except FileNotFoundError: - parsed_config, filepath = None, None + if os.path.exists(filepath): + return filepath - return parsed_config, filepath + return None def load_config_file(filepath): """Load a configuration object from a TOML formatted file. @@ -158,9 +156,12 @@ def load_config_file(filepath): config_from_file = toml.load(f) return config_from_file -def update_with_other_config(config, other_config): +def update_config(config, other_config): """Updates the current configuration object with another one. + This function assumes that other_config is a valid configuration + dictionary. + Args: config (dict[str, dict[str, Union[str, bool, int]]]): the configuration to be updated @@ -172,10 +173,8 @@ def update_with_other_config(config, other_config): configuration """ # Here an example for section is API - for section, sectionconfig in config.items(): - for key in sectionconfig: - if key in other_config[section]: - config[section][key] = other_config[section][key] + for section in config.keys(): + config[section].update(other_config[section]) def update_from_environment_variables(config): """Updates the current configuration object from data stored in environment @@ -235,4 +234,4 @@ def parse_environment_variable(key, value): DEFAULT_CONFIG = create_config_object() configuration = load_config() -config_file_path = load_config_file_if_found()[1] +config_filepath = get_config_filepath() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 3d70e4dbd..e33a93175 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -176,22 +176,26 @@ def test_config_object_every_keyword_argument(self): use_ssl=False, debug=True, port=56) == OTHER_EXPECTED_CONFIG -class TestLookForConfigInFile: - """Tests for the load_config_file_if_found function.""" +class TestGetConfigFilepath: + """Tests for the get_config_filepath function.""" - def test_loading_current_directory(self, tmpdir, monkeypatch): + def test_current_directory(self, tmpdir, monkeypatch): """Test that the default configuration file is loaded from the current directory, if found.""" filename = "config.toml" + path_to_write_file = tmpdir.join(filename) + + with open(path_to_write_file, "w") as f: + f.write(TEST_FILE) + with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) - m.setattr(conf, "load_config_file", lambda filepath: filepath) - config_file, _ = conf.load_config_file_if_found(filename=filename) + config_filepath = conf.get_config_filepath(filename=filename) - assert config_file == tmpdir.join(filename) + assert config_filepath == tmpdir.join(filename) - def test_loading_env_variable(self, tmpdir, monkeypatch): + def test_env_variable(self, tmpdir, monkeypatch): """Test that the correct configuration file is found using the correct environment variable (SF_CONF). @@ -200,19 +204,24 @@ def test_loading_env_variable(self, tmpdir, monkeypatch): filename = "config.toml" + path_to_write_file = tmpdir.join(filename) + + with open(path_to_write_file, "w") as f: + f.write(TEST_FILE) + def raise_wrapper(ex): raise ex with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - config_file, _ = conf.load_config_file_if_found(filename=filename) - assert config_file == tmpdir.join("config.toml") + config_filepath = conf.get_config_filepath(filename=filename) - def test_loading_user_config_dir(self, tmpdir, monkeypatch): + assert config_filepath == tmpdir.join("config.toml") + + def test_user_config_dir(self, tmpdir, monkeypatch): """Test that the correct configuration file is found using the correct argument to the user_config_dir function. @@ -222,6 +231,11 @@ def test_loading_user_config_dir(self, tmpdir, monkeypatch): variable.""" filename = "config.toml" + path_to_write_file = tmpdir.join(filename) + + with open(path_to_write_file, "w") as f: + f.write(TEST_FILE) + def raise_wrapper(ex): raise ex @@ -229,13 +243,13 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file, _ = conf.load_config_file_if_found(filename=filename) - assert config_file == tmpdir.join("config.toml") + config_filepath = conf.get_config_filepath(filename=filename) + + assert config_filepath == tmpdir.join("config.toml") def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): - """Test that the load_config_file_if_found returns None if the + """Test that the get_config_filepath returns None if the configuration file is nowhere to be found. This is a test case for when there is no configuration file: @@ -252,11 +266,13 @@ def raise_wrapper(ex): m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") - m.setattr(conf, "load_config_file", lambda filepath: raise_wrapper(FileNotFoundError()) if "NoConfigFileHere" in filepath else filepath) - config_file = conf.load_config_file_if_found(filename=filename) + config_filepath = conf.get_config_filepath(filename=filename) + + assert config_filepath is None - assert config_file == (None, None) +class TestLoadConfigFile: + """Tests the load_config_file function.""" def test_load_config_file(self, tmpdir, monkeypatch): """Tests that configuration is loaded correctly from a TOML file.""" @@ -265,9 +281,9 @@ def test_load_config_file(self, tmpdir, monkeypatch): with open(filename, "w") as f: f.write(TEST_FILE) - config_file = conf.load_config_file(filepath=filename) + loaded_config = conf.load_config_file(filepath=filename) - assert config_file == EXPECTED_CONFIG + assert loaded_config == EXPECTED_CONFIG def test_loading_absolute_path(self, tmpdir, monkeypatch): """Test that the default configuration file can be loaded @@ -280,12 +296,12 @@ def test_loading_absolute_path(self, tmpdir, monkeypatch): os.environ["SF_CONF"] = "" - config_file = conf.load_config_file(filepath=filename) + loaded_config = conf.load_config_file(filepath=filename) - assert config_file == EXPECTED_CONFIG + assert loaded_config == EXPECTED_CONFIG -class TestUpdateWithOtherConfig: - """Tests for the update_with_other_config function.""" +class TestUpdateConfig: + """Tests for the update_config function.""" def test_update_entire_config(self): """Tests that the entire configuration object is updated.""" @@ -293,7 +309,7 @@ def test_update_entire_config(self): config = conf.create_config_object() assert config["api"]["authentication_token"] == "" - conf.update_with_other_config(config, EXPECTED_CONFIG) + conf.update_config(config, EXPECTED_CONFIG) assert config == EXPECTED_CONFIG ONLY_AUTH_CONFIG = { @@ -326,7 +342,7 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi config = conf.create_config_object() assert config["api"][specific_key] != "PlaceHolder" - conf.update_with_other_config(config, config_to_update_with) + conf.update_config(config, config_to_update_with) assert config["api"][specific_key] == "PlaceHolder" assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) From 544b60c4938875953f52ca5247d8441a24365435 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 13:46:42 -0500 Subject: [PATCH 175/335] Renaming create_config --- strawberryfields/configuration.py | 6 +++--- tests/frontend/test_configuration.py | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 567824adc..7d58b8bbb 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -56,7 +56,7 @@ def load_config(filename="config.toml", **kwargs): Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration """ - config = create_config_object() + config = create_config() config_filepath = get_config_filepath(filename=filename) @@ -73,7 +73,7 @@ def load_config(filename="config.toml", **kwargs): return config -def create_config_object(authentication_token="", **kwargs): +def create_config(authentication_token="", **kwargs): """Create a configuration object that stores configuration related data organized into sections. @@ -232,6 +232,6 @@ def parse_environment_variable(key, value): return value -DEFAULT_CONFIG = create_config_object() +DEFAULT_CONFIG = create_config() configuration = load_config() config_filepath = get_config_filepath() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index e33a93175..078aa5971 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -155,7 +155,7 @@ class TestCreateConfigObject: def test_empty_config_object(self): """Test that an empty configuration object can be created.""" - config = conf.create_config_object(authentication_token="", + config = conf.create_config(authentication_token="", hostname="", use_ssl="", debug="", @@ -166,12 +166,12 @@ def test_empty_config_object(self): def test_config_object_with_authentication_token(self): """Test that passing only the authentication token creates the expected configuration object.""" - assert conf.create_config_object(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG + assert conf.create_config(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG def test_config_object_every_keyword_argument(self): """Test that passing every keyword argument creates the expected configuration object.""" - assert conf.create_config_object(authentication_token="SomeAuth", + assert conf.create_config(authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, debug=True, @@ -306,7 +306,7 @@ class TestUpdateConfig: def test_update_entire_config(self): """Tests that the entire configuration object is updated.""" - config = conf.create_config_object() + config = conf.create_config() assert config["api"]["authentication_token"] == "" conf.update_config(config, EXPECTED_CONFIG) @@ -339,7 +339,7 @@ def test_update_entire_config(self): ("port",ONLY_PORT_CONFIG)]) def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): """Tests that only one item in the configuration object is updated.""" - config = conf.create_config_object() + config = conf.create_config() assert config["api"][specific_key] != "PlaceHolder" conf.update_config(config, config_to_update_with) @@ -372,7 +372,7 @@ def test_all_environment_variables_defined(self): for key, value in value_mapping: os.environ[key] = value - config = conf.create_config_object() + config = conf.create_config() for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): assert v != parsed_value @@ -398,7 +398,7 @@ def test_one_environment_variable_defined(self, env_var, key, value): tear_down_all_env_var_defs() os.environ[env_var] = value - config = conf.create_config_object() + config = conf.create_config() for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): assert v != parsed_value From 228a0c45dc794e4956dcec70e9b4fe81182c8ba4 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 14:08:58 -0500 Subject: [PATCH 176/335] Modifying tests to use setenv from monkeypatching in tests --- tests/frontend/test_configuration.py | 93 ++++++++++++---------------- 1 file changed, 41 insertions(+), 52 deletions(-) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 078aa5971..97b8738e2 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -1,3 +1,4 @@ + # Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,13 +70,6 @@ "SF_API_PORT" ] -def tear_down_all_env_var_defs(): - """Making sure that no environment variables are defined after.""" - for key in environment_variables: - if key in os.environ: - del os.environ[key] - assert key not in os.environ - class TestLoadConfig: """Tests for the load_config function.""" @@ -95,13 +89,13 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): with open(filename, "w") as f: f.write(TEST_FILE) - os.environ["SF_API_AUTHENTICATION_TOKEN"] = "NotOurAuth" - os.environ["SF_API_HOSTNAME"] = "NotOurHost" - os.environ["SF_API_USE_SSL"] = "True" - os.environ["SF_API_DEBUG"] = "False" - os.environ["SF_API_PORT"] = "42" - with monkeypatch.context() as m: + m.setenv("SF_API_AUTHENTICATION_TOKEN", "NotOurAuth") + m.setenv("SF_API_HOSTNAME", "NotOurHost") + m.setenv("SF_API_USE_SSL", "True") + m.setenv("SF_API_DEBUG", "False") + m.setenv("SF_API_PORT", "42") + m.setattr(os, "getcwd", lambda: tmpdir) configuration = conf.load_config(authentication_token="SomeAuth", hostname="SomeHost", @@ -121,20 +115,19 @@ def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, with open(filename, "w") as f: f.write(TEST_FILE) - os.environ["SF_API_AUTHENTICATION_TOKEN"] = "SomeAuth" - os.environ["SF_API_HOSTNAME"] = "SomeHost" - os.environ["SF_API_USE_SSL"] = "False" - os.environ["SF_API_DEBUG"] = "True" - os.environ["SF_API_PORT"] = "56" - with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) + + m.setenv("SF_API_AUTHENTICATION_TOKEN", "SomeAuth") + m.setenv("SF_API_HOSTNAME", "SomeHost") + m.setenv("SF_API_USE_SSL", "False") + m.setenv("SF_API_DEBUG", "True") + m.setenv("SF_API_PORT", "56") + configuration = conf.load_config() assert configuration == OTHER_EXPECTED_CONFIG - tear_down_all_env_var_defs() - def test_conf_file_loads_well(self, monkeypatch, tmpdir): """Test that the load_config function loads a configuration from a TOML file correctly.""" @@ -214,7 +207,7 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") - m.setattr(os.environ, "get", lambda x, y: tmpdir if x=="SF_CONF" else "NoConfigFileHere") + m.setenv("SF_CONF", tmpdir) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") config_filepath = conf.get_config_filepath(filename=filename) @@ -241,7 +234,7 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") - m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") + m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") config_filepath = conf.get_config_filepath(filename=filename) @@ -264,7 +257,7 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") - m.setattr(os.environ, "get", lambda *args: "NoConfigFileHere") + m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") config_filepath = conf.get_config_filepath(filename=filename) @@ -294,9 +287,9 @@ def test_loading_absolute_path(self, tmpdir, monkeypatch): with open(filename, "w") as f: f.write(TEST_FILE) - - os.environ["SF_CONF"] = "" - loaded_config = conf.load_config_file(filepath=filename) + with monkeypatch.context() as m: + m.setenv("SF_CONF", "") + loaded_config = conf.load_config_file(filepath=filename) assert loaded_config == EXPECTED_CONFIG @@ -365,22 +358,22 @@ def test_update_only_one_item_in_section(self, specific_key, config_to_update_wi class TestUpdateFromEnvironmentalVariables: """Tests for the update_from_environment_variables function.""" - def test_all_environment_variables_defined(self): + def test_all_environment_variables_defined(self, monkeypatch): """Tests that the configuration object is updated correctly when all the environment variables are defined.""" - for key, value in value_mapping: - os.environ[key] = value + with monkeypatch.context() as m: + for env_var, value in value_mapping: + m.setenv(env_var, value) - config = conf.create_config() - for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): - assert v != parsed_value + config = conf.create_config() + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): + assert v != parsed_value - conf.update_from_environment_variables(config) - for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): - assert v == parsed_value + conf.update_from_environment_variables(config) + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): + assert v == parsed_value - tear_down_all_env_var_defs() environment_variables_with_keys_and_values = [ ("SF_API_AUTHENTICATION_TOKEN","authentication_token","SomeAuth"), @@ -391,27 +384,23 @@ def test_all_environment_variables_defined(self): ] @pytest.mark.parametrize("env_var, key, value", environment_variables_with_keys_and_values) - def test_one_environment_variable_defined(self, env_var, key, value): + def test_one_environment_variable_defined(self, env_var, key, value, monkeypatch): """Tests that the configuration object is updated correctly when only one environment variable is defined.""" - tear_down_all_env_var_defs() - os.environ[env_var] = value - - config = conf.create_config() - for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): - assert v != parsed_value - - conf.update_from_environment_variables(config) - assert config["api"][key] == parsed_values_mapping[env_var] + with monkeypatch.context() as m: + m.setenv(env_var, value) - for v, (key, parsed_value) in zip(config["api"].values(), parsed_values_mapping.items()): - if key != env_var: + config = conf.create_config() + for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): assert v != parsed_value - # Tear-down - del os.environ[env_var] - assert env_var not in os.environ + conf.update_from_environment_variables(config) + assert config["api"][key] == parsed_values_mapping[env_var] + + for v, (key, parsed_value) in zip(config["api"].values(), parsed_values_mapping.items()): + if key != env_var: + assert v != parsed_value def test_parse_environment_variable_boolean(self, monkeypatch): """Tests that boolean values can be parsed correctly from environment From c33c9da6c5dd73528ef7ac97670c2d914d34107a Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 15:42:28 -0500 Subject: [PATCH 177/335] Small change for readability --- strawberryfields/engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 81af7215f..252943652 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -926,7 +926,8 @@ def _request( self, method: RequestMethod, path: str, headers: Dict[str, str] = None, **kwargs ) -> requests.Response: headers = {} if headers is None else headers - return getattr(requests, method.value)( + request = getattr(requests, method.value) + return request( urljoin(self.base_url, path), headers={"Authorization": self.token, **headers}, **kwargs From 957536e9fa004201848bb40805045481fc1a73dc Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 16:01:54 -0500 Subject: [PATCH 178/335] Rename 'terminal' to 'final' --- strawberryfields/engine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 252943652..ed7b88bd6 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -570,14 +570,14 @@ class JobStatus(enum.Enum): FAILED = "failed" @property - def is_terminal(self) -> bool: + def is_final(self) -> bool: """Checks if this status represents a final and immutable state. This method is primarily used to determine if an operation is valid for a given status. Returns: - bool: ``True`` if the job status is terminal, and ``False`` otherwise + bool: ``True`` if the job status is final, and ``False`` otherwise """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) @@ -642,7 +642,7 @@ def refresh(self): Only an open or queued job can be refreshed; an exception is raised otherwise. """ - if self.status.is_terminal: + if self.status.is_final: raise InvalidJobOperationError( "A {} job cannot be refreshed".format(self.status.value) ) @@ -655,7 +655,7 @@ def cancel(self): Only an open or queued job can be cancelled; an exception is raised otherwise. """ - if self.status.is_terminal: + if self.status.is_final: raise InvalidJobOperationError( "A {} job cannot be cancelled".format(self.status.value) ) From 58eacf2854a4a472d9a2bfa87a1dd76703d43eab Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 16:33:36 -0500 Subject: [PATCH 179/335] Simplify request functions --- strawberryfields/engine.py | 85 +++++++++++------------------------ tests/frontend/test_engine.py | 43 +++++++++--------- 2 files changed, 47 insertions(+), 81 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index ed7b88bd6..7cf4097dd 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -670,14 +670,6 @@ def __str__(self): return self.__repr__() -class RequestMethod(enum.Enum): - """Defines the valid request methods for messages sent to the remote platform.""" - - GET = "get" - POST = "post" - PATCH = "patch" - - class Connection: """Manages remote connections to the remote job execution platform and exposes various job operations. @@ -735,6 +727,11 @@ def __init__( self._port = port self._use_ssl = use_ssl + self._base_url = "http{}://{}:{}".format( + "s" if self.use_ssl else "", self.host, self.port + ) + self._headers = {"Authorization": self.token} + @property def token(self) -> str: """The API authentication token. @@ -771,17 +768,6 @@ def use_ssl(self) -> bool: """ return self._use_ssl - @property - def base_url(self) -> str: - """The base URL used for the connection. - - Returns: - str: the base URL - """ - return "http{}://{}:{}".format( - "s" if self.use_ssl else "", self.host, self.port - ) - def create_job(self, target: str, program: Program, shots: int) -> Job: """Creates a job with the given circuit. @@ -801,7 +787,12 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: bb._target["options"] = {"shots": shots} circuit = bb.serialize() - response = self._post("/jobs", data=json.dumps({"circuit": circuit})) + path = "/jobs" + response = requests.post( + self._url(path), + headers=self._headers, + data=json.dumps({"circuit": circuit}), + ) if response.status_code == 201: return Job( id_=response.json()["id"], @@ -824,15 +815,6 @@ def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: Returns: List[strawberryfields.engine.Job]: the jobs """ - # response = self._get("/jobs?size={}".format(self.MAX_JOBS_REQUESTED)) - # if response.status_code == 200: - # return [ - # Job(id_=info["id"], status=info["status"], connection=self) - # for info in response.json()["data"] - # if datetime.strptime(info["created_at"], self.JOB_TIMESTAMP_FORMAT) - # > after - # ] - # raise RequestFailedError(self._format_error_message(response)) raise NotImplementedError("This feature is not yet implemented") def get_job(self, job_id: str) -> Job: @@ -844,7 +826,8 @@ def get_job(self, job_id: str) -> Job: Returns: strawberryfields.engine.Job: the job """ - response = self._get("/jobs/{}".format(job_id)) + path = "/jobs/{}".format(job_id) + response = requests.get(self._url(path), headers=self._headers) if response.status_code == 200: return Job( id_=response.json()["id"], @@ -873,8 +856,9 @@ def get_job_result(self, job_id: str) -> Result: Returns: strawberryfields.engine.Result: the job result """ - response = self._get( - "/jobs/{}/result".format(job_id), {"Accept": "application/x-numpy"}, + path = "/jobs/{}/result".format(job_id) + response = requests.get( + self._url(path), headers={"Accept": "application/x-numpy", **self._headers}, ) if response.status_code == 200: # Read the numpy binary data in the payload into memory @@ -891,8 +875,11 @@ def cancel_job(self, job_id: str): Args: job_id (str): the job ID """ - response = self._patch( - "/jobs/{}".format(job_id), data={"status", JobStatus.CANCELLED.value} + path = "/jobs/{}".format(job_id) + response = requests.patch( + self._url(path), + headers=self._headers, + data={"status", JobStatus.CANCELLED.value}, ) if response.status_code == 204: return @@ -904,34 +891,12 @@ def ping(self) -> bool: Returns: bool: ``True`` if the connection is successful, and ``False`` otherwise """ - response = self._get("/healthz") + path = "/healthz" + response = requests.get(self._url(path), headers=self._headers) return response.status_code == 200 - def _get( - self, path: str, headers: Dict[str, str] = None, **kwargs - ) -> requests.Response: - return self._request(RequestMethod.GET, path, headers, **kwargs) - - def _post( - self, path: str, headers: Dict[str, str] = None, **kwargs - ) -> requests.Response: - return self._request(RequestMethod.POST, path, headers, **kwargs) - - def _patch( - self, path: str, headers: Dict[str, str] = None, **kwargs - ) -> requests.Response: - return self._request(RequestMethod.PATCH, path, headers, **kwargs) - - def _request( - self, method: RequestMethod, path: str, headers: Dict[str, str] = None, **kwargs - ) -> requests.Response: - headers = {} if headers is None else headers - request = getattr(requests, method.value) - return request( - urljoin(self.base_url, path), - headers={"Authorization": self.token, **headers}, - **kwargs - ) + def _url(self, path: str) -> str: + return self._base_url + path @staticmethod def _format_error_message(response: requests.Response) -> str: diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 5c673c6a4..5d68283c6 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -18,6 +18,7 @@ import numpy as np import pytest +import requests import strawberryfields as sf from strawberryfields import ops @@ -240,7 +241,7 @@ def incomplete_job_raises_on_result_access(self): with pytest.raises(AttributeError): _ = job.result - def terminal_job_raises_on_refresh(self): + def final_job_raises_on_refresh(self): """Tests that `job.refresh()` raises an error for a complete, failed, or cancelled job.""" job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) @@ -248,7 +249,7 @@ def terminal_job_raises_on_refresh(self): with pytest.raises(InvalidJobOperationError): job.refresh() - def terminal_job_raises_on_cancel(self): + def final_job_raises_on_cancel(self): """Tests that `job.cancel()` raises an error for a complete, failed, or aleady cancelled job.""" job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) @@ -270,15 +271,15 @@ def test_init(self): assert connection.port == port assert connection.use_ssl == use_ssl - assert connection.base_url == "https://host:123" + assert connection._url("/abc") == "https://host:123/abc" def test_create_job(self, prog, connection, monkeypatch): """Tests a successful job creation flow.""" id_, status = "123", JobStatus.QUEUED monkeypatch.setattr( - Connection, - "_post", + requests, + "post", mock_return(mock_response(201, {"id": id_, "status": status})), ) @@ -289,7 +290,7 @@ def test_create_job(self, prog, connection, monkeypatch): def test_create_job_error(self, prog, connection, monkeypatch): """Tests a failed job creation flow.""" - monkeypatch.setattr(Connection, "_post", mock_return(mock_response(400, {}))) + monkeypatch.setattr(requests, "post", mock_return(mock_response(400, {}))) with pytest.raises(RequestFailedError): connection.create_job("chip2", prog, 1) @@ -306,7 +307,7 @@ def test_get_all_jobs(self, connection, monkeypatch): for i in range(1, 10) ] monkeypatch.setattr( - Connection, "_get", mock_return(mock_response(200, {"data": jobs})), + requests, "get", mock_return(mock_response(200, {"data": jobs})), ) jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) @@ -316,7 +317,7 @@ def test_get_all_jobs(self, connection, monkeypatch): @pytest.mark.skip(reason="method not yet implemented") def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list request.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_all_jobs() @@ -326,8 +327,8 @@ def test_get_job(self, connection, monkeypatch): id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( - Connection, - "_get", + requests, + "get", mock_return(mock_response(200, {"id": id_, "status": status.value})), ) @@ -338,7 +339,7 @@ def test_get_job(self, connection, monkeypatch): def test_get_job_error(self, connection, monkeypatch): """Tests a failed job request.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_job("123") @@ -348,8 +349,8 @@ def test_get_job_status(self, connection, monkeypatch): id_, status = "123", JobStatus.COMPLETE monkeypatch.setattr( - Connection, - "_get", + requests, + "get", mock_return(mock_response(200, {"id": id_, "status": status.value})), ) @@ -357,7 +358,7 @@ def test_get_job_status(self, connection, monkeypatch): def test_get_job_status_error(self, connection, monkeypatch): """Tests a failed job status request.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_job_status("123") @@ -370,8 +371,8 @@ def test_get_job_result(self, connection, monkeypatch): np.save(buf, result_samples) buf.seek(0) monkeypatch.setattr( - Connection, - "_get", + requests, + "get", mock_return(mock_response(200, binary_body=buf.getvalue())), ) @@ -381,34 +382,34 @@ def test_get_job_result(self, connection, monkeypatch): def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.get_job_result("123") def test_cancel_job(self, connection, monkeypatch): """Tests a successful job cancellation request.""" - monkeypatch.setattr(Connection, "_patch", mock_return(mock_response(204, {}))) + monkeypatch.setattr(requests, "patch", mock_return(mock_response(204, {}))) # A successful cancellation does not raise an exception connection.cancel_job("123") def test_cancel_job_error(self, connection, monkeypatch): """Tests a successful job cancellation request.""" - monkeypatch.setattr(Connection, "_patch", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "patch", mock_return(mock_response(404, {}))) with pytest.raises(RequestFailedError): connection.cancel_job("123") def test_ping_success(self, connection, monkeypatch): """Tests a successful ping to the remote host.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(200, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(200, {}))) assert connection.ping() def test_ping_failure(self, connection, monkeypatch): """Tests a failed ping to the remote host.""" - monkeypatch.setattr(Connection, "_get", mock_return(mock_response(500, {}))) + monkeypatch.setattr(requests, "get", mock_return(mock_response(500, {}))) assert not connection.ping() From 2a6134958ebc350a5ae8310b4c78d7604c5188a0 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 16:49:12 -0500 Subject: [PATCH 180/335] Rename 'complete' to 'completed' where relevant --- strawberryfields/engine.py | 18 +++++++++--------- tests/frontend/test_engine.py | 20 ++++++++++---------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 7cf4097dd..4fc224cb6 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -566,7 +566,7 @@ class JobStatus(enum.Enum): OPEN = "open" QUEUED = "queued" CANCELLED = "cancelled" - COMPLETE = "complete" + COMPLETED = "complete" FAILED = "failed" @property @@ -579,7 +579,7 @@ def is_final(self) -> bool: Returns: bool: ``True`` if the job status is final, and ``False`` otherwise """ - return self in (JobStatus.CANCELLED, JobStatus.COMPLETE, JobStatus.FAILED) + return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) class Job: @@ -623,15 +623,15 @@ def status(self) -> JobStatus: def result(self) -> Result: """The job result. - This is only defined for complete jobs, and raises an exception for any other + This is only defined for completed jobs, and raises an exception for any other status. Returns: strawberryfields.engine.Result: the job result """ - if self.status != JobStatus.COMPLETE: + if self.status != JobStatus.COMPLETED: raise AttributeError( - "The result is undefined for jobs that are not complete " + "The result is undefined for jobs that are not completed " "(current status: {})".format(self.status.value) ) return self._result @@ -647,7 +647,7 @@ def refresh(self): "A {} job cannot be refreshed".format(self.status.value) ) self._status = self._connection.get_job_status(self.id) - if self._status == JobStatus.COMPLETE: + if self._status == JobStatus.COMPLETED: self._result = self._connection.get_job_result(self.id) def cancel(self): @@ -929,7 +929,7 @@ class StarshipEngine: # Run a job synchronously result = engine.run(program, shots=1) - # (Engine blocks until job is complete) + # (Engine blocks until job is completed) result # [[0 1 0 2 1 0 0 0]] # Run a job synchronously, but cancel it before it is completed @@ -942,7 +942,7 @@ class StarshipEngine: job.result # InvalidJobOperationError # (After some time...) job.refresh() - job.status # + job.status # job.result # [[0 1 0 2 1 0 0 0]] Args: @@ -1011,7 +1011,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: try: while True: job.refresh() - if job.status == JobStatus.COMPLETE: + if job.status == JobStatus.COMPLETED: return job.result if job.status == JobStatus.FAILED: raise JobFailedError("The computation failed; please try again.") diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 5d68283c6..11b3087b2 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -202,7 +202,7 @@ def mock_response(status_code, json_body=None, binary_body=None): class MockServer: """A mock platform server that fakes a processing delay by counting requests.""" - REQUESTS_BEFORE_COMPLETE = 3 + REQUESTS_BEFORE_COMPLETED = 3 def __init__(self): self.request_count = 0 @@ -213,8 +213,8 @@ def get_job_status(self, _id): """ self.request_count += 1 return ( - JobStatus.COMPLETE - if self.request_count >= self.REQUESTS_BEFORE_COMPLETE + JobStatus.COMPLETED + if self.request_count >= self.REQUESTS_BEFORE_COMPLETED else JobStatus.QUEUED ) @@ -244,7 +244,7 @@ def incomplete_job_raises_on_result_access(self): def final_job_raises_on_refresh(self): """Tests that `job.refresh()` raises an error for a complete, failed, or cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) + job = Job("abc", status=JobStatus.COMPLETED, connection=Connection) with pytest.raises(InvalidJobOperationError): job.refresh() @@ -252,7 +252,7 @@ def final_job_raises_on_refresh(self): def final_job_raises_on_cancel(self): """Tests that `job.cancel()` raises an error for a complete, failed, or aleady cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETE, connection=Connection) + job = Job("abc", status=JobStatus.COMPLETED, connection=Connection) with pytest.raises(InvalidJobOperationError): job.cancel() @@ -301,7 +301,7 @@ def test_get_all_jobs(self, connection, monkeypatch): jobs = [ { "id": str(i), - "status": JobStatus.COMPLETE, + "status": JobStatus.COMPLETED, "created_at": "2020-01-{:02d}T12:34:56.123456Z".format(i), } for i in range(1, 10) @@ -324,7 +324,7 @@ def test_get_all_jobs_error(self, connection, monkeypatch): def test_get_job(self, connection, monkeypatch): """Tests a successful job request.""" - id_, status = "123", JobStatus.COMPLETE + id_, status = "123", JobStatus.COMPLETED monkeypatch.setattr( requests, @@ -346,7 +346,7 @@ def test_get_job_error(self, connection, monkeypatch): def test_get_job_status(self, connection, monkeypatch): """Tests a successful job status request.""" - id_, status = "123", JobStatus.COMPLETE + id_, status = "123", JobStatus.COMPLETED monkeypatch.setattr( requests, @@ -463,10 +463,10 @@ def test_run_async(self, connection, prog, monkeypatch): job = engine.run_async(prog) assert job.status == JobStatus.OPEN - for _ in range(server.REQUESTS_BEFORE_COMPLETE): + for _ in range(server.REQUESTS_BEFORE_COMPLETED): job.refresh() - assert job.status == JobStatus.COMPLETE + assert job.status == JobStatus.COMPLETED assert np.array_equal(job.result.samples.T, result_expected) with pytest.raises(AttributeError): From 4711ed41da7b1f5b0977bfb004dd1b74a1f3b655 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 17:19:27 -0500 Subject: [PATCH 181/335] Use MockResponse class --- tests/frontend/test_engine.py | 52 +++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 11b3087b2..792c3efd6 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -14,7 +14,6 @@ r"""Unit tests for engine.py""" from datetime import datetime import io -from unittest.mock import MagicMock import numpy as np import pytest @@ -188,15 +187,20 @@ def mock_return(return_value): return lambda *args, **kwargs: return_value -def mock_response(status_code, json_body=None, binary_body=None): - """A helper function for creating a mock response with a JSON or binary body.""" - response = MagicMock() - response.status_code = status_code - if json_body: - response.json.return_value = json_body - if binary_body: - response.content = binary_body - return response +class MockResponse: + """A mock response with a JSON or binary body.""" + + def __init__(self, status_code, json_body=None, binary_body=None): + self.status_code = status_code + self.json_body = json_body + self.binary_body = binary_body + + def json(self): + return self.json_body + + @property + def content(self): + return self.binary_body class MockServer: @@ -280,7 +284,7 @@ def test_create_job(self, prog, connection, monkeypatch): monkeypatch.setattr( requests, "post", - mock_return(mock_response(201, {"id": id_, "status": status})), + mock_return(MockResponse(201, {"id": id_, "status": status})), ) job = connection.create_job("chip2", prog, 1) @@ -290,7 +294,7 @@ def test_create_job(self, prog, connection, monkeypatch): def test_create_job_error(self, prog, connection, monkeypatch): """Tests a failed job creation flow.""" - monkeypatch.setattr(requests, "post", mock_return(mock_response(400, {}))) + monkeypatch.setattr(requests, "post", mock_return(MockResponse(400, {}))) with pytest.raises(RequestFailedError): connection.create_job("chip2", prog, 1) @@ -307,7 +311,7 @@ def test_get_all_jobs(self, connection, monkeypatch): for i in range(1, 10) ] monkeypatch.setattr( - requests, "get", mock_return(mock_response(200, {"data": jobs})), + requests, "get", mock_return(MockResponse(200, {"data": jobs})), ) jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) @@ -317,7 +321,7 @@ def test_get_all_jobs(self, connection, monkeypatch): @pytest.mark.skip(reason="method not yet implemented") def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list request.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): connection.get_all_jobs() @@ -329,7 +333,7 @@ def test_get_job(self, connection, monkeypatch): monkeypatch.setattr( requests, "get", - mock_return(mock_response(200, {"id": id_, "status": status.value})), + mock_return(MockResponse(200, {"id": id_, "status": status.value})), ) job = connection.get_job(id_) @@ -339,7 +343,7 @@ def test_get_job(self, connection, monkeypatch): def test_get_job_error(self, connection, monkeypatch): """Tests a failed job request.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): connection.get_job("123") @@ -351,14 +355,14 @@ def test_get_job_status(self, connection, monkeypatch): monkeypatch.setattr( requests, "get", - mock_return(mock_response(200, {"id": id_, "status": status.value})), + mock_return(MockResponse(200, {"id": id_, "status": status.value})), ) assert connection.get_job_status(id_) == status def test_get_job_status_error(self, connection, monkeypatch): """Tests a failed job status request.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): connection.get_job_status("123") @@ -373,7 +377,7 @@ def test_get_job_result(self, connection, monkeypatch): monkeypatch.setattr( requests, "get", - mock_return(mock_response(200, binary_body=buf.getvalue())), + mock_return(MockResponse(200, binary_body=buf.getvalue())), ) result = connection.get_job_result("123") @@ -382,34 +386,34 @@ def test_get_job_result(self, connection, monkeypatch): def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): connection.get_job_result("123") def test_cancel_job(self, connection, monkeypatch): """Tests a successful job cancellation request.""" - monkeypatch.setattr(requests, "patch", mock_return(mock_response(204, {}))) + monkeypatch.setattr(requests, "patch", mock_return(MockResponse(204, {}))) # A successful cancellation does not raise an exception connection.cancel_job("123") def test_cancel_job_error(self, connection, monkeypatch): """Tests a successful job cancellation request.""" - monkeypatch.setattr(requests, "patch", mock_return(mock_response(404, {}))) + monkeypatch.setattr(requests, "patch", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): connection.cancel_job("123") def test_ping_success(self, connection, monkeypatch): """Tests a successful ping to the remote host.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(200, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(200, {}))) assert connection.ping() def test_ping_failure(self, connection, monkeypatch): """Tests a failed ping to the remote host.""" - monkeypatch.setattr(requests, "get", mock_return(mock_response(500, {}))) + monkeypatch.setattr(requests, "get", mock_return(MockResponse(500, {}))) assert not connection.ping() From 4787515ebdbf35c0dfa63f28a40f9bd0d433e726 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Tue, 25 Feb 2020 17:29:48 -0500 Subject: [PATCH 182/335] Update tests/frontend/test_engine.py Co-Authored-By: antalszava --- tests/frontend/test_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 792c3efd6..f8d9a44db 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -399,7 +399,7 @@ def test_cancel_job(self, connection, monkeypatch): connection.cancel_job("123") def test_cancel_job_error(self, connection, monkeypatch): - """Tests a successful job cancellation request.""" + """Tests a failed job cancellation request.""" monkeypatch.setattr(requests, "patch", mock_return(MockResponse(404, {}))) with pytest.raises(RequestFailedError): From 4614d830ec854600c3c506fc79f886022245ffcf Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 20:02:27 -0500 Subject: [PATCH 183/335] Adding configuration options and environment variables to module docstring, inserting refs to these in function docstrings, correcting Kwargs section naming in docstrings --- strawberryfields/configuration.py | 57 ++++++++++++++++++------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7d58b8bbb..3f1bce78e 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -14,6 +14,31 @@ r""" This module contains the :class:`Configuration` class, which is used to load, store, save, and modify configuration options for Strawberry Fields. + +Configuration options +********************* + +.. note:: + The following configuration options are taken into consideration: + + * **authentication_token (str)** (*required*): the token used for user authentication + * **hostname (str)** (*optional*): the name of the host to connect to + * **use_ssl (bool)** (*optional*): specifies if requests should be sent using SSL + * **port (int)** (*optional*): the port to be used when connecting to the remote service + * **debug (bool)** (*optional*): determines if the debugging mode is requested + +Environment variables +********************* + +.. note:: + + When loading the configuration, the following environment variables are checked: + + * SF_API_AUTHENTICATION_TOKEN + * SF_API_HOSTNAME + * SF_API_USE_SSL + * SF_API_DEBUG + * SF_API_PORT """ import logging as log import os @@ -44,14 +69,11 @@ def load_config(filename="config.toml", **kwargs): 2. data contained in environmental variables (if any) 3. data contained in a configuration file (if exists) - Keyword arguments: + Kwargs: filename (str): the name of the configuration file to look for - authentication_token (str): the token to be used for user - authentication - hostname (str): the name of the host to connect to - use_ssl (bool): specifies if requests should be sent using SSL - port (int): the port to be used when connecting to the remote service - debug (bool): determines if the debugging mode is requested + + Furthermore configuration options as detailed in + :mod:`strawberryfields.configuration` Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration @@ -83,13 +105,8 @@ def create_config(authentication_token="", **kwargs): If called without passing any keyword arguments, then a default configuration object is created. - Keyword arguments: - authentication_token (str): the token to be used for user - authentication - hostname (str): the name of the host to connect to - use_ssl (bool): specifies if requests should be sent using SSL - port (int): the port to be used when connecting to the remote service - debug (bool): determines if the debugging mode is requested + Kwargs: + Configuration options as detailed in :mod:`strawberryfields.configuration` Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration @@ -123,7 +140,7 @@ def get_config_filepath(filename="config.toml"): * The directory specified by the environment variable SF_CONF (if specified) * The user configuration directory (if specified) - Keyword arguments: + Kwargs: filename (str): the configuration file to look for Returns: @@ -180,15 +197,7 @@ def update_from_environment_variables(config): """Updates the current configuration object from data stored in environment variables. - .. note:: - - The following environment variables are checked: - - * SF_API_AUTHENTICATION_TOKEN - * SF_API_HOSTNAME - * SF_API_USE_SSL - * SF_API_DEBUG - * SF_API_PORT + The list of environment variables can be found at :mod:`strawberryfields.configuration` Args: config (dict[str, dict[str, Union[str, bool, int]]]): the From 09a3b3d8c5af3fd6602c3de3ce6183cf999fb457 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 20:14:26 -0500 Subject: [PATCH 184/335] Removing redundant return None statement --- strawberryfields/configuration.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 3f1bce78e..80ef6dbf4 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -157,8 +157,6 @@ def get_config_filepath(filename="config.toml"): if os.path.exists(filepath): return filepath - return None - def load_config_file(filepath): """Load a configuration object from a TOML formatted file. From ef68ab50ed7371e7d281ef9de2ee98b3a8dbd91c Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 21:16:59 -0500 Subject: [PATCH 185/335] Remove update_config function, add keep_valid_options function and add tests --- strawberryfields/configuration.py | 27 +++++------ tests/frontend/test_configuration.py | 72 ++++++++++------------------ 2 files changed, 37 insertions(+), 62 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 80ef6dbf4..54645c5bd 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -84,14 +84,15 @@ def load_config(filename="config.toml", **kwargs): if config_filepath is not None: loaded_config = load_config_file(config_filepath) - update_config(config, other_config=loaded_config) + valid_api_options = keep_valid_options(loaded_config["api"]) + config["api"].update(valid_api_options) else: log.info("No Strawberry Fields configuration file found.") update_from_environment_variables(config) - config_from_keyword_arguments = {"api": kwargs} - update_config(config, other_config=config_from_keyword_arguments) + valid_kwargs_config = keep_valid_options(kwargs) + config["api"].update(valid_kwargs_config) return config @@ -171,25 +172,18 @@ def load_config_file(filepath): config_from_file = toml.load(f) return config_from_file -def update_config(config, other_config): - """Updates the current configuration object with another one. - - This function assumes that other_config is a valid configuration - dictionary. +def keep_valid_options(sectionconfig): + """Filters the valid options in a section of a configuration dictionary. Args: - config (dict[str, dict[str, Union[str, bool, int]]]): the - configuration to be updated - other_config (dict[str, dict[str, Union[str, bool, int]]]): the - configuration used for updating + sectionconfig (dict[str, Union[str, bool, int]]): the section of the + configuration to check Returns: - dict[str, dict[str, Union[str, bool, int]]]): the updated + dict[str, Union[str, bool, int]]: the keep section of the configuration """ - # Here an example for section is API - for section in config.keys(): - config[section].update(other_config[section]) + return {k: v for k, v in sectionconfig.items() if k in VALID_KEYS} def update_from_environment_variables(config): """Updates the current configuration object from data stored in environment @@ -239,6 +233,7 @@ def parse_environment_variable(key, value): return value +VALID_KEYS = set(create_config()["api"].keys()) DEFAULT_CONFIG = create_config() configuration = load_config() config_filepath = get_config_filepath() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 97b8738e2..046840ef9 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -1,4 +1,3 @@ - # Copyright 2019-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -293,51 +292,32 @@ def test_loading_absolute_path(self, tmpdir, monkeypatch): assert loaded_config == EXPECTED_CONFIG -class TestUpdateConfig: - """Tests for the update_config function.""" - - def test_update_entire_config(self): - """Tests that the entire configuration object is updated.""" - - config = conf.create_config() - assert config["api"]["authentication_token"] == "" - - conf.update_config(config, EXPECTED_CONFIG) - assert config == EXPECTED_CONFIG - - ONLY_AUTH_CONFIG = { - "api": {"authentication_token": "PlaceHolder"} - } - - ONLY_HOST_CONFIG = { - "api": {"hostname": "PlaceHolder",} - } - - ONLY_SSL_CONFIG = { - "api": {"use_ssl": "PlaceHolder"} - } - - ONLY_DEBUG_CONFIG = { - "api": {"debug": "PlaceHolder"} - } - - ONLY_PORT_CONFIG = { - "api": {"port": "PlaceHolder"} - } - - @pytest.mark.parametrize("specific_key, config_to_update_with", [("authentication_token",ONLY_AUTH_CONFIG), - ("hostname",ONLY_HOST_CONFIG), - ("use_ssl",ONLY_SSL_CONFIG), - ("debug",ONLY_DEBUG_CONFIG), - ("port",ONLY_PORT_CONFIG)]) - def test_update_only_one_item_in_section(self, specific_key, config_to_update_with): - """Tests that only one item in the configuration object is updated.""" - config = conf.create_config() - assert config["api"][specific_key] != "PlaceHolder" - - conf.update_config(config, config_to_update_with) - assert config["api"][specific_key] == "PlaceHolder" - assert all(v != "PlaceHolder" for k, v in config["api"].items() if k != specific_key) +class TestKeepValidOptions: + + def test_only_invalid_options(self): + section_config_with_invalid_options = {'NotValid1': 1, + 'NotValid2': 2, + 'NotValid3': 3 + } + assert conf.keep_valid_options(section_config_with_invalid_options) == {} + + def test_valid_and_invalid_options(self): + section_config_with_invalid_options = { 'authentication_token': 'MyToken', + 'NotValid1': 1, + 'NotValid2': 2, + 'NotValid3': 3 + } + assert conf.keep_valid_options(section_config_with_invalid_options) == {'authentication_token': 'MyToken'} + + def test_only_valid_options(self): + section_config_only_valid = { + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "hostname": "localhost", + "use_ssl": True, + "port": 443, + "debug": False, + } + assert conf.keep_valid_options(section_config_only_valid) == EXPECTED_CONFIG["api"] value_mapping = [ ("SF_API_AUTHENTICATION_TOKEN","SomeAuth"), From 3ca025e7f0bd16b5a06eff917ff7916b7e390d96 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 21:40:16 -0500 Subject: [PATCH 186/335] Addig DEFAULT_CONFIG_SPEC --- strawberryfields/configuration.py | 24 +++++++++++++++++------- tests/frontend/test_configuration.py | 7 ++----- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 54645c5bd..8e68f3561 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -48,8 +48,18 @@ log.getLogger() -BOOLEAN_KEYS = {"debug": False, "use_ssl": True} -INTEGER_KEYS = {"port": 443} +DEFAULT_CONFIG_SPEC = { + "api": { + "authentication_token": (str, ""), + "hostname": (str, "localhost"), + "use_ssl": (bool, True), + "port": (int, 443), + "debug": (bool, False), + } +} + +BOOLEAN_KEYS = {"debug", "use_ssl"} +INTEGER_KEYS = {"port"} class ConfigurationError(Exception): """Exception used for configuration errors""" @@ -114,9 +124,9 @@ def create_config(authentication_token="", **kwargs): object """ hostname = kwargs.get("hostname", "localhost") - use_ssl = kwargs.get("use_ssl", BOOLEAN_KEYS["use_ssl"]) - port = kwargs.get("port", INTEGER_KEYS["port"]) - debug = kwargs.get("debug", BOOLEAN_KEYS["debug"]) + use_ssl = kwargs.get("use_ssl", DEFAULT_CONFIG_SPEC["api"]["use_ssl"][1]) + port = kwargs.get("port", DEFAULT_CONFIG_SPEC["api"]["port"][1]) + debug = kwargs.get("debug",DEFAULT_CONFIG_SPEC["api"]["debug"][1]) config = { "api": { @@ -219,7 +229,7 @@ def parse_environment_variable(key, value): trues = (True, "true", "True", "TRUE", "1", 1) falses = (False, "false", "False", "FALSE", "0", 0) - if key in BOOLEAN_KEYS: + if DEFAULT_CONFIG_SPEC["api"][key][0] is bool: if value in trues: return True @@ -228,7 +238,7 @@ def parse_environment_variable(key, value): raise ValueError("Boolean could not be parsed") - if key in INTEGER_KEYS: + if DEFAULT_CONFIG_SPEC["api"][key][0] is int: return int(value) return value diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 046840ef9..16aaf309a 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -385,7 +385,7 @@ def test_one_environment_variable_defined(self, env_var, key, value, monkeypatch def test_parse_environment_variable_boolean(self, monkeypatch): """Tests that boolean values can be parsed correctly from environment variables.""" - monkeypatch.setattr(conf, "BOOLEAN_KEYS", ("some_boolean",)) + monkeypatch.setattr(conf, "DEFAULT_CONFIG_SPEC", {"api": {"some_boolean": (bool, True)}}) assert conf.parse_environment_variable("some_boolean", "true") is True assert conf.parse_environment_variable("some_boolean", "True") is True assert conf.parse_environment_variable("some_boolean", "TRUE") is True @@ -398,12 +398,9 @@ def test_parse_environment_variable_boolean(self, monkeypatch): assert conf.parse_environment_variable("some_boolean", "0") is False assert conf.parse_environment_variable("some_boolean", 0) is False - assert conf.parse_environment_variable("not_a_boolean","something_else") == "something_else" - def test_parse_environment_variable_integer(self, monkeypatch): """Tests that integer values can be parsed correctly from environment variables.""" - monkeypatch.setattr(conf, "INTEGER_KEYS", ("some_integer",)) + monkeypatch.setattr(conf, "DEFAULT_CONFIG_SPEC", {"api": {"some_integer": (int, 123)}}) assert conf.parse_environment_variable("some_integer", "123") == 123 - assert conf.parse_environment_variable("not_an_integer","something_else") == "something_else" From 37720155a8de7ae7c17f5c4cbe5a52af451bc98e Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 21:53:09 -0500 Subject: [PATCH 187/335] Removed debug option for now --- strawberryfields/configuration.py | 7 +------ tests/frontend/test_configuration.py | 8 -------- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 8e68f3561..3cce2329a 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -25,7 +25,6 @@ * **hostname (str)** (*optional*): the name of the host to connect to * **use_ssl (bool)** (*optional*): specifies if requests should be sent using SSL * **port (int)** (*optional*): the port to be used when connecting to the remote service - * **debug (bool)** (*optional*): determines if the debugging mode is requested Environment variables ********************* @@ -37,7 +36,6 @@ * SF_API_AUTHENTICATION_TOKEN * SF_API_HOSTNAME * SF_API_USE_SSL - * SF_API_DEBUG * SF_API_PORT """ import logging as log @@ -54,11 +52,10 @@ "hostname": (str, "localhost"), "use_ssl": (bool, True), "port": (int, 443), - "debug": (bool, False), } } -BOOLEAN_KEYS = {"debug", "use_ssl"} +BOOLEAN_KEYS = {"use_ssl"} INTEGER_KEYS = {"port"} class ConfigurationError(Exception): @@ -126,7 +123,6 @@ def create_config(authentication_token="", **kwargs): hostname = kwargs.get("hostname", "localhost") use_ssl = kwargs.get("use_ssl", DEFAULT_CONFIG_SPEC["api"]["use_ssl"][1]) port = kwargs.get("port", DEFAULT_CONFIG_SPEC["api"]["port"][1]) - debug = kwargs.get("debug",DEFAULT_CONFIG_SPEC["api"]["debug"][1]) config = { "api": { @@ -134,7 +130,6 @@ def create_config(authentication_token="", **kwargs): "hostname": hostname, "use_ssl": use_ssl, "port": port, - "debug": debug } } return config diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 16aaf309a..c0eb45d12 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -32,7 +32,6 @@ hostname = "localhost" use_ssl = true port = 443 -debug = false """ TEST_FILE_ONE_VALUE = """\ @@ -47,7 +46,6 @@ "hostname": "localhost", "use_ssl": True, "port": 443, - "debug": False, } } @@ -57,7 +55,6 @@ "hostname": "SomeHost", "use_ssl": False, "port": 56, - "debug": True, } } @@ -99,7 +96,6 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): configuration = conf.load_config(authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, - debug=True, port=56 ) @@ -150,7 +146,6 @@ def test_empty_config_object(self): config = conf.create_config(authentication_token="", hostname="", use_ssl="", - debug="", port="") assert all(value=="" for value in config["api"].values()) @@ -166,7 +161,6 @@ def test_config_object_every_keyword_argument(self): assert conf.create_config(authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, - debug=True, port=56) == OTHER_EXPECTED_CONFIG class TestGetConfigFilepath: """Tests for the get_config_filepath function.""" @@ -315,7 +309,6 @@ def test_only_valid_options(self): "hostname": "localhost", "use_ssl": True, "port": 443, - "debug": False, } assert conf.keep_valid_options(section_config_only_valid) == EXPECTED_CONFIG["api"] @@ -360,7 +353,6 @@ def test_all_environment_variables_defined(self, monkeypatch): ("SF_API_HOSTNAME","hostname","SomeHost"), ("SF_API_USE_SSL","use_ssl","False"), ("SF_API_PORT","port", "56"), - ("SF_API_DEBUG","debug","True") ] @pytest.mark.parametrize("env_var, key, value", environment_variables_with_keys_and_values) From d21f3feda00dc7f898e8d8550e68941519add955 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 21:53:36 -0500 Subject: [PATCH 188/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 80ef6dbf4..6b0171095 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -19,7 +19,7 @@ ********************* .. note:: - The following configuration options are taken into consideration: + The following configuration options are available:``` * **authentication_token (str)** (*required*): the token used for user authentication * **hostname (str)** (*optional*): the name of the host to connect to From 7c73d5ca48d25633a909706be853c1fc764f456e Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 21:55:47 -0500 Subject: [PATCH 189/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 6b0171095..75a99931e 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -72,7 +72,7 @@ def load_config(filename="config.toml", **kwargs): Kwargs: filename (str): the name of the configuration file to look for - Furthermore configuration options as detailed in + Configuration options are detailed in :mod:`strawberryfields.configuration` Returns: From 47eac17318644c29dcb5e4ec622efe3225ee4f78 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 22:36:26 -0500 Subject: [PATCH 190/335] Moving configuration details to page, updating the configuration page with kwargs, adding port to file and env vars --- doc/introduction/configuration.rst | 30 ++++++++++++++++++++++-------- strawberryfields/configuration.py | 27 +++++---------------------- 2 files changed, 27 insertions(+), 30 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index e49c893b6..fc0ea5e5b 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -33,15 +33,29 @@ and has the following format: authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true + port = 443 Summary of options ------------------ -SF_API_USE_SSL: - Whether to use SSL or not when connecting to the API. True or False. -SF_API_HOSTNAME: - The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed - hosts. -SF_API_AUTHENTICATION_TOKEN: - The authentication token to use when connecting to the API. Will be sent with every request in - the header. +Keyword arguments +***************** + + **authentication_token (str)** (*required*) + The authentication token to use when connecting to the API. Will be sent with every request in + the header. + **hostname (str)** (*optional*) + The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed + hosts. + **use_ssl (bool)** (*optional*) + Whether to use SSL or not when connecting to the API. True or False. + **port (int)** (*optional*) + The port to be used when connecting to the remote service. + +Environment variables +********************* + +* SF_API_AUTHENTICATION_TOKEN +* SF_API_HOSTNAME +* SF_API_USE_SSL +* SF_API_PORT \ No newline at end of file diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 3cce2329a..aa797e424 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -15,28 +15,11 @@ This module contains the :class:`Configuration` class, which is used to load, store, save, and modify configuration options for Strawberry Fields. -Configuration options -********************* +.. warning:: -.. note:: - The following configuration options are taken into consideration: + Details on the configuration options can be found at + :doc:`/introduction/configuration`. - * **authentication_token (str)** (*required*): the token used for user authentication - * **hostname (str)** (*optional*): the name of the host to connect to - * **use_ssl (bool)** (*optional*): specifies if requests should be sent using SSL - * **port (int)** (*optional*): the port to be used when connecting to the remote service - -Environment variables -********************* - -.. note:: - - When loading the configuration, the following environment variables are checked: - - * SF_API_AUTHENTICATION_TOKEN - * SF_API_HOSTNAME - * SF_API_USE_SSL - * SF_API_PORT """ import logging as log import os @@ -80,7 +63,7 @@ def load_config(filename="config.toml", **kwargs): filename (str): the name of the configuration file to look for Furthermore configuration options as detailed in - :mod:`strawberryfields.configuration` + :doc:`/introduction/configuration` Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration @@ -114,7 +97,7 @@ def create_config(authentication_token="", **kwargs): configuration object is created. Kwargs: - Configuration options as detailed in :mod:`strawberryfields.configuration` + Configuration options as detailed in :doc:`/introduction/configuration` Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration From e0938a9640b39e7dbce71d7b14d1faa9b4495cd7 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 22:53:04 -0500 Subject: [PATCH 191/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index c498312ad..29e075d59 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -62,7 +62,7 @@ def load_config(filename="config.toml", **kwargs): Kwargs: filename (str): the name of the configuration file to look for - Configuration options are detailed in + Additional configuration options are detailed in :doc:`/introduction/configuration` Returns: From 17be8bb60c66848e135338143a1abf54fbc6d772 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 23:06:29 -0500 Subject: [PATCH 192/335] Applying comments: Configuration page modification, removing redundant module attributes, reword module docstring --- doc/introduction/configuration.rst | 38 +++++++++++++----------------- strawberryfields/configuration.py | 11 ++++----- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index fc0ea5e5b..2f5d2771b 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -35,27 +35,21 @@ and has the following format: use_ssl = true port = 443 -Summary of options ------------------- - -Keyword arguments -***************** - - **authentication_token (str)** (*required*) - The authentication token to use when connecting to the API. Will be sent with every request in - the header. - **hostname (str)** (*optional*) - The hostname of the server to connect to. Defaults to localhost. Must be one of the allowed - hosts. - **use_ssl (bool)** (*optional*) - Whether to use SSL or not when connecting to the API. True or False. - **port (int)** (*optional*) - The port to be used when connecting to the remote service. - -Environment variables +Configuration options ********************* -* SF_API_AUTHENTICATION_TOKEN -* SF_API_HOSTNAME -* SF_API_USE_SSL -* SF_API_PORT \ No newline at end of file +**authentication_token (str)** (*required*) + The authentication token to use when connecting to the API. Will be sent with every request in + the header. Corresponding environment variable: ``SF_API_AUTHENTICATION_TOKEN`` + +**hostname (str)** (*optional*) + The hostname of the server to connect to. Defaults to ``localhost``. Must be one of the allowed + hosts. Corresponding environment variable: ``SF_API_HOSTNAME`` + +**use_ssl (bool)** (*optional*) + Whether to use SSL or not when connecting to the API. True or False. + Corresponding environment variable: ``SF_API_USE_SSL`` + +**port (int)** (*optional*) + The port to be used when connecting to the remote service. + Corresponding environment variable: ``SF_API_PORT`` \ No newline at end of file diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index c498312ad..eb9f614ba 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. r""" -This module contains the :class:`Configuration` class, which is used to -load, store, save, and modify configuration options for Strawberry Fields. +This module contains functions used to load, store, save, and modify +configuration options for Strawberry Fields. .. warning:: - The following configuration options are available: - :doc:`/introduction/configuration`. + See more details regarding Strawberry Fields configuration and available + configuration options on the :doc:`/introduction/configuration` page. """ import logging as log @@ -38,9 +38,6 @@ } } -BOOLEAN_KEYS = {"use_ssl"} -INTEGER_KEYS = {"port"} - class ConfigurationError(Exception): """Exception used for configuration errors""" From 66358799a1f5ced8ef42633d7c996b41e445a271 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 25 Feb 2020 23:19:32 -0500 Subject: [PATCH 193/335] CHANGELOG --- .github/CHANGELOG.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 274000c31..b114ec51d 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -9,6 +9,11 @@ ### Improvements +* Replaced the `Configuration` class with the `load_config` and auxiliary + functions to load configuration from keyword arguments, environment variables + and configuration file. + [#298](https://github.com/XanaduAI/strawberryfields/pull/298) + ### Bug fixes * Symbolic Operation parameters are now compatible with TensorFlow 2.0 objects. @@ -22,7 +27,7 @@ This release contains contributions from (in alphabetical order): -Ville Bergholm, Jack Ceroni +Ville Bergholm, Jack Ceroni, Antal Száva --- From f2352886287e263b0a5aa4760fb97a032e1ecd25 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 10:23:18 -0500 Subject: [PATCH 194/335] First draft of store_account --- strawberryfields/configuration.py | 35 ++++++++++++++++++++++ tests/frontend/test_configuration.py | 44 ++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index ce8dd6bc5..340669933 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -218,6 +218,41 @@ def parse_environment_variable(key, value): return value +def store_account(authentication_token, filename="config.toml", create_locally=True, **kwargs): + """Stores an account in a configuration file. + + Args: + authentication_token (str): + + Kwargs: + create_locally (bool): determines if the configuration file should be + saved locally or globally (to the user configuration directory) + filename (str): the name of the configuration file to look for + + Configuration options are detailed in + :doc:`/introduction/configuration` + """ + if create_locally: + directory = os.getcwd() + else: + directory = user_config_dir("strawberryfields", "Xanadu") + + filepath = os.path.join(directory, filename) + + config = create_config_object(authentication_token=authentication_token, **kwargs) + save_config_to_file(config, filepath) + +def save_config_to_file(config, filepath): + """Saves a configuration to a TOML file. + + Args: + config (dict[str, dict[str, Union[str, bool, int]]]): the + configuration to be saved + filepath (str): path to the configuration file + """ + with open(filepath, "w") as f: + toml.dump(config, f) + VALID_KEYS = set(create_config()["api"].keys()) DEFAULT_CONFIG = create_config() configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index c0eb45d12..8dea48a1d 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -396,3 +396,47 @@ def test_parse_environment_variable_integer(self, monkeypatch): monkeypatch.setattr(conf, "DEFAULT_CONFIG_SPEC", {"api": {"some_integer": (int, 123)}}) assert conf.parse_environment_variable("some_integer", "123") == 123 + +DEFAULT_KWARGS = { + "api": { + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "hostname": "localhost", + "use_ssl": True, + "port": 443, + } + } + +class TestStoreAccount: + """Tests for the store_account function.""" + + def test_config_created_locally(self): + """Tests that a configuration file was created in the current + directory.""" + + test_filename = "test_config.toml" + + + + call_history = [] + m.setattr(os, "getcwd", lambda: tmpdir) + m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) + conf.store_account(authentication_token, filename=test_filename, create_locally=True, **DEFAULT_KWARGS) + + assert call_history[0] == DEFAULT_CONFIG + assert call_history[0] == tmpdir.join(test_filename) + +class TestSaveConfigToFile: + """Tests for the store_account function.""" + + def test_save(self, tmpdir): + """Test saving a configuration file.""" + filename = str(tmpdir.join("test_config.toml")) + + config = EXPECTED_CONFIG + + # make a change + config["api"]["hostname"] = "https://6.4.2.4" + conf.save_config_to_file(filename) + + result = toml.load(filename) + assert config == result From 97b2a7fa324b69fb891cc1544c5120e1af271fec Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 11:07:21 -0500 Subject: [PATCH 195/335] Update strawberryfields/engine.py Co-Authored-By: Josh Izaac --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 4fc224cb6..811943804 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -586,7 +586,7 @@ class Job: """Represents a remote job that can be queried for its status or result. This object should typically not be instantiated directly, but returned by an - `Engine` or `Connection` when a job is run. + ``Engine`` or ``Connection`` when a job is run. Args: id_ (str): the job ID From 0c0b109ef7ce3aee241a0554dfa6b677f613b7f5 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 11:10:18 -0500 Subject: [PATCH 196/335] Unit tests and docstrings --- strawberryfields/configuration.py | 6 +++-- tests/frontend/test_configuration.py | 35 +++++++++++++++++++++------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 340669933..2351ad728 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -222,7 +222,9 @@ def store_account(authentication_token, filename="config.toml", create_locally=T """Stores an account in a configuration file. Args: - authentication_token (str): + authentication_token (str): the authentication token to use when + connecting to the API, which will be sent with every request in + the header Kwargs: create_locally (bool): determines if the configuration file should be @@ -239,7 +241,7 @@ def store_account(authentication_token, filename="config.toml", create_locally=T filepath = os.path.join(directory, filename) - config = create_config_object(authentication_token=authentication_token, **kwargs) + config = create_config(authentication_token=authentication_token, **kwargs) save_config_to_file(config, filepath) def save_config_to_file(config, filepath): diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 8dea48a1d..717546626 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -409,34 +409,51 @@ def test_parse_environment_variable_integer(self, monkeypatch): class TestStoreAccount: """Tests for the store_account function.""" - def test_config_created_locally(self): + def test_config_created_locally(self, monkeypatch, tmpdir): """Tests that a configuration file was created in the current directory.""" test_filename = "test_config.toml" + call_history = [] + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: tmpdir) + m.setattr(conf, "user_config_dir", lambda *args: "NotTheCorrectDir") + m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) + conf.store_account(authentication_token, filename=test_filename, create_locally=True, **DEFAULT_KWARGS) + + assert call_history[0][0] == EXPECTED_CONFIG + assert call_history[0][1] == tmpdir.join(test_filename) + def test_global_config_created(self, monkeypatch, tmpdir): + """Tests that a configuration file was created in the user + configuration directory for Strawberry Fields.""" + + test_filename = "test_config.toml" call_history = [] - m.setattr(os, "getcwd", lambda: tmpdir) - m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) - conf.store_account(authentication_token, filename=test_filename, create_locally=True, **DEFAULT_KWARGS) + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: "NotTheCorrectDir") + m.setattr(conf, "user_config_dir", lambda *args: tmpdir) + m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) + conf.store_account(authentication_token, filename=test_filename, create_locally=False, **DEFAULT_KWARGS) - assert call_history[0] == DEFAULT_CONFIG - assert call_history[0] == tmpdir.join(test_filename) + assert call_history[0][0] == EXPECTED_CONFIG + assert call_history[0][1] == tmpdir.join(test_filename) class TestSaveConfigToFile: """Tests for the store_account function.""" def test_save(self, tmpdir): """Test saving a configuration file.""" - filename = str(tmpdir.join("test_config.toml")) + test_filename = "test_config.toml" + filepath = str(tmpdir.join(test_filename)) config = EXPECTED_CONFIG # make a change config["api"]["hostname"] = "https://6.4.2.4" - conf.save_config_to_file(filename) + conf.save_config_to_file(config, filepath) - result = toml.load(filename) + result = toml.load(filepath) assert config == result From b0d12b871728f57be7d4f1474d96ee6637849357 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 11:11:50 -0500 Subject: [PATCH 197/335] Update strawberryfields/engine.py Co-Authored-By: Josh Izaac --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 811943804..d1d0acb1c 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -1022,7 +1022,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. - In the asynchronous mode, a `Job` is returned immediately, and the user can + In the asynchronous mode, a ``Job`` is returned immediately, and the user can manually refresh the status and result of the job. Args: From 2566163d86e28ca322fb63ccba7979ffbaffee07 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 11:30:21 -0500 Subject: [PATCH 198/335] Added extra unit test, modified the configuration.rst --- doc/introduction/configuration.rst | 17 ++++++++++++++++- strawberryfields/configuration.py | 9 ++++++--- tests/frontend/test_configuration.py | 26 +++++++++++++++++++------- 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 2f5d2771b..84cc06ec2 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -52,4 +52,19 @@ Configuration options **port (int)** (*optional*) The port to be used when connecting to the remote service. - Corresponding environment variable: ``SF_API_PORT`` \ No newline at end of file + Corresponding environment variable: ``SF_API_PORT`` + +Store your account +------------------ + +Using the :func:`configuration.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, optional configuration options can be passed as keyword arguments. + +The following is an example for using `store_account` with defaults: + +.. code:: + + import strawberryfields as sf + my_token = "MyToken" + sf.store_account(my_token) + +It is advised to execute this code snippet **only once** per configuration, separately from any other Strawberry Fields scripts. Using the default options it will store the account in the current working directory by creating a `config.toml` file. \ No newline at end of file diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 2351ad728..589bb241c 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -221,14 +221,17 @@ def parse_environment_variable(key, value): def store_account(authentication_token, filename="config.toml", create_locally=True, **kwargs): """Stores an account in a configuration file. + The configuration file is created either in the current working direct + (locally) or in the user configuration directory (globally). + Args: authentication_token (str): the authentication token to use when - connecting to the API, which will be sent with every request in - the header + connecting to the API, it will be sent with every request in the + header Kwargs: create_locally (bool): determines if the configuration file should be - saved locally or globally (to the user configuration directory) + saved locally or globally filename (str): the name of the configuration file to look for Configuration options are detailed in diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 717546626..2d7bce915 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -444,16 +444,28 @@ def test_global_config_created(self, monkeypatch, tmpdir): class TestSaveConfigToFile: """Tests for the store_account function.""" - def test_save(self, tmpdir): + def test_correct(self, tmpdir): """Test saving a configuration file.""" test_filename = "test_config.toml" filepath = str(tmpdir.join(test_filename)) - config = EXPECTED_CONFIG - - # make a change - config["api"]["hostname"] = "https://6.4.2.4" - conf.save_config_to_file(config, filepath) + conf.save_config_to_file(OTHER_EXPECTED_CONFIG, filepath) result = toml.load(filepath) - assert config == result + assert result == OTHER_EXPECTED_CONFIG + + def test_file_already_existed(self, tmpdir): + """Test saving a configuration file even if the file already existed.""" + test_filename = "test_config.toml" + filepath = str(tmpdir.join(test_filename)) + + with open(filepath, "w") as f: + f.write(TEST_FILE) + + result_for_existing_file = toml.load(filepath) + assert result_for_existing_file == EXPECTED_CONFIG + + conf.save_config_to_file(OTHER_EXPECTED_CONFIG, filepath) + + result_for_new_file = toml.load(filepath) + assert result_for_new_file == OTHER_EXPECTED_CONFIG From 684de214e60c787aade09ce7729e2d509efe459d Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 11:55:41 -0500 Subject: [PATCH 199/335] Modified configuration page; renamed configuration location parameter and recognized arguments --- doc/introduction/configuration.rst | 30 +++++++++++++++++++++------- strawberryfields/configuration.py | 15 +++++++------- tests/frontend/test_configuration.py | 4 ++-- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 84cc06ec2..4509a6755 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -36,7 +36,7 @@ and has the following format: port = 443 Configuration options -********************* +--------------------- **authentication_token (str)** (*required*) The authentication token to use when connecting to the API. Will be sent with every request in @@ -57,14 +57,30 @@ Configuration options Store your account ------------------ -Using the :func:`configuration.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, optional configuration options can be passed as keyword arguments. +Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, optional configuration options can be passed as keyword arguments. -The following is an example for using `store_account` with defaults: +Configure for the current SF project +************************************ -.. code:: +The following is an example for using ``store_account`` with defaults: + +.. code-block:: python + + import strawberryfields as sf + sf.store_account("MyToken") + +where ``"MyToken"`` contains the user specific authentication token. + +It is advised to execute this code snippet **only once** per configuration in the same directory where the SF project can be found. It should also be separated from any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. + +Configure for every SF project +****************************** + +The following code snippet can be used to create a configuration file for *every Strawberry Fields project*. + +.. code-block:: python import strawberryfields as sf - my_token = "MyToken" - sf.store_account(my_token) + sf.store_account("MyToken", location="user_config") -It is advised to execute this code snippet **only once** per configuration, separately from any other Strawberry Fields scripts. Using the default options it will store the account in the current working directory by creating a `config.toml` file. \ No newline at end of file +where ``"MyToken"`` is the user specific authentication token. \ No newline at end of file diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 589bb241c..04368b814 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -218,11 +218,12 @@ def parse_environment_variable(key, value): return value -def store_account(authentication_token, filename="config.toml", create_locally=True, **kwargs): +def store_account(authentication_token, filename="config.toml", location="local", **kwargs): """Stores an account in a configuration file. - The configuration file is created either in the current working direct - (locally) or in the user configuration directory (globally). + The configuration file can be created in the following locations: + - current working direct (local) + - user configuration directory (user_config) Args: authentication_token (str): the authentication token to use when @@ -230,16 +231,16 @@ def store_account(authentication_token, filename="config.toml", create_locally=T header Kwargs: - create_locally (bool): determines if the configuration file should be - saved locally or globally + location (str): determines where the configuration file should be saved + filename (str): the name of the configuration file to look for Configuration options are detailed in :doc:`/introduction/configuration` """ - if create_locally: + if location == "local": directory = os.getcwd() - else: + elif location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") filepath = os.path.join(directory, filename) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 2d7bce915..bc0638d6f 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -420,7 +420,7 @@ def test_config_created_locally(self, monkeypatch, tmpdir): m.setattr(os, "getcwd", lambda: tmpdir) m.setattr(conf, "user_config_dir", lambda *args: "NotTheCorrectDir") m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) - conf.store_account(authentication_token, filename=test_filename, create_locally=True, **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename=test_filename, location="local", **DEFAULT_KWARGS) assert call_history[0][0] == EXPECTED_CONFIG assert call_history[0][1] == tmpdir.join(test_filename) @@ -436,7 +436,7 @@ def test_global_config_created(self, monkeypatch, tmpdir): m.setattr(os, "getcwd", lambda: "NotTheCorrectDir") m.setattr(conf, "user_config_dir", lambda *args: tmpdir) m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) - conf.store_account(authentication_token, filename=test_filename, create_locally=False, **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) assert call_history[0][0] == EXPECTED_CONFIG assert call_history[0][1] == tmpdir.join(test_filename) From ad2b3535ca90ab95b254f90dbc95cd15a5f7ecd0 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 11:59:54 -0500 Subject: [PATCH 200/335] Refactor Connection, Job, Result to modules under api namespace --- strawberryfields/api/__init__.py | 20 ++ strawberryfields/api/connection.py | 276 +++++++++++++++++ strawberryfields/api/job.py | 138 +++++++++ strawberryfields/api/result.py | 115 +++++++ strawberryfields/engine.py | 465 +---------------------------- tests/frontend/test_engine.py | 9 +- 6 files changed, 555 insertions(+), 468 deletions(-) create mode 100644 strawberryfields/api/__init__.py create mode 100644 strawberryfields/api/connection.py create mode 100644 strawberryfields/api/job.py create mode 100644 strawberryfields/api/result.py diff --git a/strawberryfields/api/__init__.py b/strawberryfields/api/__init__.py new file mode 100644 index 000000000..c332a1a66 --- /dev/null +++ b/strawberryfields/api/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +TODO +""" + +from .connection import Connection, RequestFailedError +from .job import Job, JobStatus, InvalidJobOperationError +from .result import Result diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py new file mode 100644 index 000000000..eec2ef733 --- /dev/null +++ b/strawberryfields/api/connection.py @@ -0,0 +1,276 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +TODO +""" +from datetime import datetime +import io +import json +from typing import List + +import numpy as np +import requests + +from strawberryfields.io import to_blackbird +from strawberryfields.program import Program +from .job import Job, JobStatus +from .result import Result + + +class RequestFailedError(Exception): + """Raised when a request to the remote platform returns an error response.""" + + +class Connection: + """Manages remote connections to the remote job execution platform and exposes + various job operations. + + For basic usage, it is not necessary to manually instantiate this object; the user + is encouraged to use the higher-level interface provided by :class:`~StarshipEngine`. + + **Example:** + + The following example instantiates a :class:`~Connection` for a given API + authentication token, tests the connection, submits a new job, and makes requests + for a single or multiple existing jobs. + + .. code-block:: python + + connection = Connection(token="abc") + + # Ping the remote server + success = connection.ping() + # True if successful, or False if cannot connect or not authenticated + + # Submit a new job + job = connection.create_job("chip2", program, shots=123) + job # + + # Get all jobs submitted for this token + jobs = connection.get_all_jobs() + jobs # [, ...] + + # Get a specific job by ID + job = connection.get_job("59a1c0b1-c6a7-4f9b-ae37-0ac5eec9c413") + job # + + Args: + token (str): the API authentication token + host (str): the hostname of the remote platform + port (int): the port to connect to on the remote host + use_ssl (bool): whether to use SSL for the connection + """ + + MAX_JOBS_REQUESTED = 100 + JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + + # pylint: disable=bad-continuation + # See: https://github.com/PyCQA/pylint/issues/289 + def __init__( + self, + token: str, + host: str = "platform.strawberryfields.ai", + port: int = 443, + use_ssl: bool = True, + ): + self._token = token + self._host = host + self._port = port + self._use_ssl = use_ssl + + self._base_url = "http{}://{}:{}".format( + "s" if self.use_ssl else "", self.host, self.port + ) + self._headers = {"Authorization": self.token} + + @property + def token(self) -> str: + """The API authentication token. + + Returns: + str: the authentication token + """ + return self._token + + @property + def host(self) -> str: + """The host for the remote platform. + + Returns: + str: the hostname + """ + return self._host + + @property + def port(self) -> int: + """The port to connect to on the remote host. + + Returns: + int: the port number + """ + return self._port + + @property + def use_ssl(self) -> bool: + """Whether to use SSL for the connection. + + Returns: + bool: ``True`` if SSL should be used, and ``False`` otherwise + """ + return self._use_ssl + + def create_job(self, target: str, program: Program, shots: int) -> Job: + """Creates a job with the given circuit. + + Args: + target (str): the target device + program (strawberryfields.Program): the quantum circuit + shots (int): the number of shots + + Returns: + strawberryfields.engine.Job: the created job + """ + # Serialize a blackbird circuit for network transmission + bb = to_blackbird(program) + # pylint: disable=protected-access + bb._target["name"] = target + # pylint: disable=protected-access + bb._target["options"] = {"shots": shots} + circuit = bb.serialize() + + path = "/jobs" + response = requests.post( + self._url(path), + headers=self._headers, + data=json.dumps({"circuit": circuit}), + ) + if response.status_code == 201: + return Job( + id_=response.json()["id"], + status=JobStatus(response.json()["status"]), + connection=self, + ) + raise RequestFailedError( + "Job creation failed: {}".format(self._format_error_message(response)) + ) + + def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: + """Gets a list of jobs created by the user, optionally filtered by datetime. + + A maximum of the 100 most recent jobs are returned. + + Args: + after (datetime.datetime): if provided, only jobs more recently created + then ``after`` are returned + + Returns: + List[strawberryfields.engine.Job]: the jobs + """ + raise NotImplementedError("This feature is not yet implemented") + + def get_job(self, job_id: str) -> Job: + """Gets a job. + + Args: + job_id (str): the job ID + + Returns: + strawberryfields.engine.Job: the job + """ + path = "/jobs/{}".format(job_id) + response = requests.get(self._url(path), headers=self._headers) + if response.status_code == 200: + return Job( + id_=response.json()["id"], + status=JobStatus(response.json()["status"]), + connection=self, + ) + raise RequestFailedError(self._format_error_message(response)) + + def get_job_status(self, job_id: str) -> JobStatus: + """Returns the status of a job. + + Args: + job_id (str): the job ID + + Returns: + strawberryfields.engine.JobStatus: the job status + """ + return JobStatus(self.get_job(job_id).status) + + def get_job_result(self, job_id: str) -> Result: + """Returns the result of a job. + + Args: + job_id (str): the job ID + + Returns: + strawberryfields.engine.Result: the job result + """ + path = "/jobs/{}/result".format(job_id) + response = requests.get( + self._url(path), headers={"Accept": "application/x-numpy", **self._headers}, + ) + if response.status_code == 200: + # Read the numpy binary data in the payload into memory + with io.BytesIO() as buf: + buf.write(response.content) + buf.seek(0) + samples = np.load(buf) + return Result(samples, is_stateful=False) + raise RequestFailedError(self._format_error_message(response)) + + def cancel_job(self, job_id: str): + """Cancels a job. + + Args: + job_id (str): the job ID + """ + path = "/jobs/{}".format(job_id) + response = requests.patch( + self._url(path), + headers=self._headers, + data={"status", JobStatus.CANCELLED.value}, + ) + if response.status_code == 204: + return + raise RequestFailedError(self._format_error_message(response)) + + def ping(self) -> bool: + """Tests the connection to the remote backend. + + Returns: + bool: ``True`` if the connection is successful, and ``False`` otherwise + """ + path = "/healthz" + response = requests.get(self._url(path), headers=self._headers) + return response.status_code == 200 + + def _url(self, path: str) -> str: + return self._base_url + path + + @staticmethod + def _format_error_message(response: requests.Response) -> str: + body = response.json() + return "{} ({}): {}".format( + body.get("status_code", ""), body.get("code", ""), body.get("detail", "") + ) + + def __repr__(self): + return "<{}: token={}, host={}>".format( + self.__class__.__name__, self.token, self.host + ) + + def __str__(self): + return self.__repr__() diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py new file mode 100644 index 000000000..adfe7a94b --- /dev/null +++ b/strawberryfields/api/job.py @@ -0,0 +1,138 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +TODO +""" + +from strawberryfields.program import Program +from .result import Result +import enum + + +class InvalidJobOperationError(Exception): + """Raised when an invalid operation is performed on a job.""" + + +class JobStatus(enum.Enum): + """Represents the status of a remote job. + + This class maps a set of job statuses to the string representations returned by the + remote platform. + """ + + OPEN = "open" + QUEUED = "queued" + CANCELLED = "cancelled" + COMPLETED = "complete" + FAILED = "failed" + + @property + def is_final(self) -> bool: + """Checks if this status represents a final and immutable state. + + This method is primarily used to determine if an operation is valid for a given + status. + + Returns: + bool: ``True`` if the job status is final, and ``False`` otherwise + """ + return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) + + +class Job: + """Represents a remote job that can be queried for its status or result. + + This object should typically not be instantiated directly, but returned by an + ``Engine`` or ``Connection`` when a job is run. + + Args: + id_ (str): the job ID + status (strawberryfields.engine.JobStatus): the job status + connection (strawberryfields.engine.Connection): the connection over which the + job is managed + """ + + def __init__(self, id_: str, status: JobStatus, connection: "Connection"): + self._id = id_ + self._status = status + self._connection = connection + self._result = None + + @property + def id(self) -> str: + """The job ID. + + Returns: + str: the job ID + """ + return self._id + + @property + def status(self) -> JobStatus: + """The job status. + + Returns: + strawberryfields.engine.JobStatus: the job status + """ + return self._status + + @property + def result(self) -> Result: + """The job result. + + This is only defined for completed jobs, and raises an exception for any other + status. + + Returns: + strawberryfields.engine.Result: the job result + """ + if self.status != JobStatus.COMPLETED: + raise AttributeError( + "The result is undefined for jobs that are not completed " + "(current status: {})".format(self.status.value) + ) + return self._result + + def refresh(self): + """Refreshes the status of the job, along with the job result if the job is + newly completed. + + Only an open or queued job can be refreshed; an exception is raised otherwise. + """ + if self.status.is_final: + raise InvalidJobOperationError( + "A {} job cannot be refreshed".format(self.status.value) + ) + self._status = self._connection.get_job_status(self.id) + if self._status == JobStatus.COMPLETED: + self._result = self._connection.get_job_result(self.id) + + def cancel(self): + """Cancels the job. + + Only an open or queued job can be cancelled; an exception is raised otherwise. + """ + if self.status.is_final: + raise InvalidJobOperationError( + "A {} job cannot be cancelled".format(self.status.value) + ) + self._connection.cancel_job(self.id) + + def __repr__(self): + return "<{}: id={}, status={}>".format( + self.__class__.__name__, self.id, self.status.value + ) + + def __str__(self): + return self.__repr__() diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py new file mode 100644 index 000000000..2f8b7d791 --- /dev/null +++ b/strawberryfields/api/result.py @@ -0,0 +1,115 @@ +# Copyright 2019-2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +TODO +""" +import numpy as np + + +class Result: + """Result of a quantum computation. + + Represents the results of the execution of a quantum program on a local or + remote backend. + + The returned :class:`~Result` object provides several useful properties + for accessing the results of your program execution: + + * ``results.state``: The quantum state object contains details and methods + for manipulation of the final circuit state. Not available for remote + backends. See :doc:`/introduction/states` for more information regarding available + state methods. + + * ``results.samples``: Measurement samples from any measurements performed. + + **Example:** + + The following examples run an existing Strawberry Fields + quantum :class:`~.Program` on the Gaussian engine to get + a results object. + + Using this results object, the measurement samples + can be returned, as well as quantum state information. + + >>> eng = sf.Engine("gaussian") + >>> results = eng.run(prog) + >>> print(results) + Result: 3 subsystems + state: + samples: [0, 0, 0] + >>> results.samples + [0, 0, 0] + >>> results.state.is_pure() + True + + .. note:: + + Only local simulators will return a state object. Remote + simulators and hardware backends will return + measurement samples (:attr:`Result.samples`), + but the return value of ``Result.state`` will be ``None``. + """ + + def __init__(self, samples, is_stateful=True): + self._state = None + self._is_stateful = is_stateful + + # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array + if len(np.shape(samples)) > 1: + samples = np.stack(samples, 1) + self._samples = samples + + @property + def samples(self): + """Measurement samples. + + Returned measurement samples will have shape ``(modes,)``. If multiple + shots are requested during execution, the returned measurement samples + will instead have shape ``(shots, modes)``. + + Returns: + array[array[float, int]]: measurement samples returned from + program execution + """ + return self._samples + + @property + def state(self): + """The quantum state object. + + The quantum state object contains details and methods + for manipulation of the final circuit state. + + See :doc:`/introduction/states` for more information regarding available + state methods. + + .. note:: + + Only local simulators will return a state object. Remote + simulators and hardware backends will return + measurement samples (:attr:`Result.samples`), + but the return value of ``Result.state`` will be ``None``. + + Returns: + BaseState: quantum state returned from program execution + """ + if not self._is_stateful: + raise AttributeError("The state is undefined for a stateless computation.") + return self._state + + def __str__(self): + """String representation.""" + return "Result: {} subsystems, state: {}\n samples: {}".format( + len(self.samples), self.state, self.samples + ) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index d1d0acb1c..5b4fd82bb 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -33,6 +33,7 @@ from strawberryfields.configuration import DEFAULT_CONFIG from strawberryfields.io import to_blackbird from strawberryfields.program import Program +from strawberryfields.api import Connection, Job, JobStatus, Result from .backends import load_backend from .backends.base import BaseBackend, NotApplicableError @@ -41,104 +42,6 @@ __all__ = ["Result", "BaseEngine", "LocalEngine", "Connection"] -class Result: - """Result of a quantum computation. - - Represents the results of the execution of a quantum program on a local or - remote backend. - - The returned :class:`~Result` object provides several useful properties - for accessing the results of your program execution: - - * ``results.state``: The quantum state object contains details and methods - for manipulation of the final circuit state. Not available for remote - backends. See :doc:`/introduction/states` for more information regarding available - state methods. - - * ``results.samples``: Measurement samples from any measurements performed. - - **Example:** - - The following examples run an existing Strawberry Fields - quantum :class:`~.Program` on the Gaussian engine to get - a results object. - - Using this results object, the measurement samples - can be returned, as well as quantum state information. - - >>> eng = sf.Engine("gaussian") - >>> results = eng.run(prog) - >>> print(results) - Result: 3 subsystems - state: - samples: [0, 0, 0] - >>> results.samples - [0, 0, 0] - >>> results.state.is_pure() - True - - .. note:: - - Only local simulators will return a state object. Remote - simulators and hardware backends will return - measurement samples (:attr:`Result.samples`), - but the return value of ``Result.state`` will be ``None``. - """ - - def __init__(self, samples, is_stateful=True): - self._state = None - self._is_stateful = is_stateful - - # ``samples`` arrives as a list of arrays, need to convert here to a multidimensional array - if len(np.shape(samples)) > 1: - samples = np.stack(samples, 1) - self._samples = samples - - @property - def samples(self): - """Measurement samples. - - Returned measurement samples will have shape ``(modes,)``. If multiple - shots are requested during execution, the returned measurement samples - will instead have shape ``(shots, modes)``. - - Returns: - array[array[float, int]]: measurement samples returned from - program execution - """ - return self._samples - - @property - def state(self): - """The quantum state object. - - The quantum state object contains details and methods - for manipulation of the final circuit state. - - See :doc:`/introduction/states` for more information regarding available - state methods. - - .. note:: - - Only local simulators will return a state object. Remote - simulators and hardware backends will return - measurement samples (:attr:`Result.samples`), - but the return value of ``Result.state`` will be ``None``. - - Returns: - BaseState: quantum state returned from program execution - """ - if not self._is_stateful: - raise AttributeError("The state is undefined for a stateless computation.") - return self._state - - def __str__(self): - """String representation.""" - return "Result: {} subsystems, state: {}\n samples: {}".format( - len(self.samples), self.state, self.samples - ) - - class BaseEngine(abc.ABC): r"""Abstract base class for quantum program executor engines. @@ -544,376 +447,10 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): return result -class RequestFailedError(Exception): - """Raised when a request to the remote platform returns an error response.""" - - -class InvalidJobOperationError(Exception): - """Raised when an invalid operation is performed on a job.""" - - class JobFailedError(Exception): """Raised when a remote job enters a 'failed' status.""" -class JobStatus(enum.Enum): - """Represents the status of a remote job. - - This class maps a set of job statuses to the string representations returned by the - remote platform. - """ - - OPEN = "open" - QUEUED = "queued" - CANCELLED = "cancelled" - COMPLETED = "complete" - FAILED = "failed" - - @property - def is_final(self) -> bool: - """Checks if this status represents a final and immutable state. - - This method is primarily used to determine if an operation is valid for a given - status. - - Returns: - bool: ``True`` if the job status is final, and ``False`` otherwise - """ - return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) - - -class Job: - """Represents a remote job that can be queried for its status or result. - - This object should typically not be instantiated directly, but returned by an - ``Engine`` or ``Connection`` when a job is run. - - Args: - id_ (str): the job ID - status (strawberryfields.engine.JobStatus): the job status - connection (strawberryfields.engine.Connection): the connection over which the - job is managed - """ - - def __init__(self, id_: str, status: JobStatus, connection: "Connection"): - self._id = id_ - self._status = status - self._connection = connection - self._result = None - - @property - def id(self) -> str: - """The job ID. - - Returns: - str: the job ID - """ - return self._id - - @property - def status(self) -> JobStatus: - """The job status. - - Returns: - strawberryfields.engine.JobStatus: the job status - """ - return self._status - - @property - def result(self) -> Result: - """The job result. - - This is only defined for completed jobs, and raises an exception for any other - status. - - Returns: - strawberryfields.engine.Result: the job result - """ - if self.status != JobStatus.COMPLETED: - raise AttributeError( - "The result is undefined for jobs that are not completed " - "(current status: {})".format(self.status.value) - ) - return self._result - - def refresh(self): - """Refreshes the status of the job, along with the job result if the job is - newly completed. - - Only an open or queued job can be refreshed; an exception is raised otherwise. - """ - if self.status.is_final: - raise InvalidJobOperationError( - "A {} job cannot be refreshed".format(self.status.value) - ) - self._status = self._connection.get_job_status(self.id) - if self._status == JobStatus.COMPLETED: - self._result = self._connection.get_job_result(self.id) - - def cancel(self): - """Cancels the job. - - Only an open or queued job can be cancelled; an exception is raised otherwise. - """ - if self.status.is_final: - raise InvalidJobOperationError( - "A {} job cannot be cancelled".format(self.status.value) - ) - self._connection.cancel_job(self.id) - - def __repr__(self): - return "<{}: id={}, status={}>".format( - self.__class__.__name__, self.id, self.status.value - ) - - def __str__(self): - return self.__repr__() - - -class Connection: - """Manages remote connections to the remote job execution platform and exposes - various job operations. - - For basic usage, it is not necessary to manually instantiate this object; the user - is encouraged to use the higher-level interface provided by :class:`~StarshipEngine`. - - **Example:** - - The following example instantiates a :class:`~Connection` for a given API - authentication token, tests the connection, submits a new job, and makes requests - for a single or multiple existing jobs. - - .. code-block:: python - - connection = Connection(token="abc") - - # Ping the remote server - success = connection.ping() - # True if successful, or False if cannot connect or not authenticated - - # Submit a new job - job = connection.create_job("chip2", program, shots=123) - job # - - # Get all jobs submitted for this token - jobs = connection.get_all_jobs() - jobs # [, ...] - - # Get a specific job by ID - job = connection.get_job("59a1c0b1-c6a7-4f9b-ae37-0ac5eec9c413") - job # - - Args: - token (str): the API authentication token - host (str): the hostname of the remote platform - port (int): the port to connect to on the remote host - use_ssl (bool): whether to use SSL for the connection - """ - - MAX_JOBS_REQUESTED = 100 - JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - - # pylint: disable=bad-continuation - # See: https://github.com/PyCQA/pylint/issues/289 - def __init__( - self, - token: str, - host: str = "platform.strawberryfields.ai", - port: int = 443, - use_ssl: bool = True, - ): - self._token = token - self._host = host - self._port = port - self._use_ssl = use_ssl - - self._base_url = "http{}://{}:{}".format( - "s" if self.use_ssl else "", self.host, self.port - ) - self._headers = {"Authorization": self.token} - - @property - def token(self) -> str: - """The API authentication token. - - Returns: - str: the authentication token - """ - return self._token - - @property - def host(self) -> str: - """The host for the remote platform. - - Returns: - str: the hostname - """ - return self._host - - @property - def port(self) -> int: - """The port to connect to on the remote host. - - Returns: - int: the port number - """ - return self._port - - @property - def use_ssl(self) -> bool: - """Whether to use SSL for the connection. - - Returns: - bool: ``True`` if SSL should be used, and ``False`` otherwise - """ - return self._use_ssl - - def create_job(self, target: str, program: Program, shots: int) -> Job: - """Creates a job with the given circuit. - - Args: - target (str): the target device - program (strawberryfields.Program): the quantum circuit - shots (int): the number of shots - - Returns: - strawberryfields.engine.Job: the created job - """ - # Serialize a blackbird circuit for network transmission - bb = to_blackbird(program) - # pylint: disable=protected-access - bb._target["name"] = target - # pylint: disable=protected-access - bb._target["options"] = {"shots": shots} - circuit = bb.serialize() - - path = "/jobs" - response = requests.post( - self._url(path), - headers=self._headers, - data=json.dumps({"circuit": circuit}), - ) - if response.status_code == 201: - return Job( - id_=response.json()["id"], - status=JobStatus(response.json()["status"]), - connection=self, - ) - raise RequestFailedError( - "Job creation failed: {}".format(self._format_error_message(response)) - ) - - def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: - """Gets a list of jobs created by the user, optionally filtered by datetime. - - A maximum of the 100 most recent jobs are returned. - - Args: - after (datetime.datetime): if provided, only jobs more recently created - then ``after`` are returned - - Returns: - List[strawberryfields.engine.Job]: the jobs - """ - raise NotImplementedError("This feature is not yet implemented") - - def get_job(self, job_id: str) -> Job: - """Gets a job. - - Args: - job_id (str): the job ID - - Returns: - strawberryfields.engine.Job: the job - """ - path = "/jobs/{}".format(job_id) - response = requests.get(self._url(path), headers=self._headers) - if response.status_code == 200: - return Job( - id_=response.json()["id"], - status=JobStatus(response.json()["status"]), - connection=self, - ) - raise RequestFailedError(self._format_error_message(response)) - - def get_job_status(self, job_id: str) -> JobStatus: - """Returns the status of a job. - - Args: - job_id (str): the job ID - - Returns: - strawberryfields.engine.JobStatus: the job status - """ - return JobStatus(self.get_job(job_id).status) - - def get_job_result(self, job_id: str) -> Result: - """Returns the result of a job. - - Args: - job_id (str): the job ID - - Returns: - strawberryfields.engine.Result: the job result - """ - path = "/jobs/{}/result".format(job_id) - response = requests.get( - self._url(path), headers={"Accept": "application/x-numpy", **self._headers}, - ) - if response.status_code == 200: - # Read the numpy binary data in the payload into memory - with io.BytesIO() as buf: - buf.write(response.content) - buf.seek(0) - samples = np.load(buf) - return Result(samples, is_stateful=False) - raise RequestFailedError(self._format_error_message(response)) - - def cancel_job(self, job_id: str): - """Cancels a job. - - Args: - job_id (str): the job ID - """ - path = "/jobs/{}".format(job_id) - response = requests.patch( - self._url(path), - headers=self._headers, - data={"status", JobStatus.CANCELLED.value}, - ) - if response.status_code == 204: - return - raise RequestFailedError(self._format_error_message(response)) - - def ping(self) -> bool: - """Tests the connection to the remote backend. - - Returns: - bool: ``True`` if the connection is successful, and ``False`` otherwise - """ - path = "/healthz" - response = requests.get(self._url(path), headers=self._headers) - return response.status_code == 200 - - def _url(self, path: str) -> str: - return self._base_url + path - - @staticmethod - def _format_error_message(response: requests.Response) -> str: - body = response.json() - return "{} ({}): {}".format( - body.get("status_code", ""), body.get("code", ""), body.get("detail", "") - ) - - def __repr__(self): - return "<{}: token={}, host={}>".format( - self.__class__.__name__, self.token, self.host - ) - - def __str__(self): - return self.__repr__() - - class StarshipEngine: """A quantum program executor engine that that provides a simple interface for running remote jobs in a synchronous or asynchronous manner. diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index f8d9a44db..572a8a144 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -21,16 +21,17 @@ import strawberryfields as sf from strawberryfields import ops -from strawberryfields.backends.base import BaseBackend -from strawberryfields.engine import ( +from strawberryfields.api import ( Connection, - InvalidJobOperationError, Job, JobStatus, RequestFailedError, Result, - StarshipEngine, + InvalidJobOperationError, + RequestFailedError, ) +from strawberryfields.backends.base import BaseBackend +from strawberryfields.engine import StarshipEngine pytestmark = pytest.mark.frontend From 795b3d1e6c7b9b960da6a7faa82aa4959a004931 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 12:01:32 -0500 Subject: [PATCH 201/335] Add non-recognized option and test --- strawberryfields/configuration.py | 2 ++ tests/frontend/test_configuration.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 04368b814..212b3b5c8 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -242,6 +242,8 @@ def store_account(authentication_token, filename="config.toml", location="local" directory = os.getcwd() elif location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") + else: + raise ConfigurationError("This location is not recognized.") filepath = os.path.join(directory, filename) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index bc0638d6f..c56f4a10b 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -441,6 +441,18 @@ def test_global_config_created(self, monkeypatch, tmpdir): assert call_history[0][0] == EXPECTED_CONFIG assert call_history[0][1] == tmpdir.join(test_filename) + def test_location_not_recognized_error(self, monkeypatch, tmpdir): + """Tests that a configuration file was created in the user + configuration directory for Strawberry Fields.""" + + test_filename = "test_config.toml" + + with pytest.raises( + conf.ConfigurationError, + match="This location is not recognized.", + ): + conf.store_account(authentication_token, filename=test_filename, location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) + class TestSaveConfigToFile: """Tests for the store_account function.""" From 594bc73839701c26ba7da0f5eb18fe57892b392a Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 12:07:37 -0500 Subject: [PATCH 202/335] Linting --- strawberryfields/configuration.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 212b3b5c8..848bc0199 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -30,12 +30,12 @@ log.getLogger() DEFAULT_CONFIG_SPEC = { - "api": { + "api": { "authentication_token": (str, ""), "hostname": (str, "localhost"), "use_ssl": (bool, True), "port": (int, 443), - } + } } class ConfigurationError(Exception): @@ -67,10 +67,10 @@ def load_config(filename="config.toml", **kwargs): """ config = create_config() - config_filepath = get_config_filepath(filename=filename) + filepath = get_config_filepath(filename=filename) - if config_filepath is not None: - loaded_config = load_config_file(config_filepath) + if filepath is not None: + loaded_config = load_config_file(filepath) valid_api_options = keep_valid_options(loaded_config["api"]) config["api"].update(valid_api_options) else: @@ -143,6 +143,8 @@ def get_config_filepath(filename="config.toml"): if os.path.exists(filepath): return filepath + return None + def load_config_file(filepath): """Load a configuration object from a TOML formatted file. From 8d5f5fa10096dd8d833190394d9e76a5c817cc27 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 12:16:18 -0500 Subject: [PATCH 203/335] Docstrings, adjusting webpage description --- doc/introduction/configuration.rst | 4 ++-- strawberryfields/configuration.py | 2 +- tests/frontend/test_configuration.py | 7 ++++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 4509a6755..28faddc60 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -57,7 +57,7 @@ Configuration options Store your account ------------------ -Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, optional configuration options can be passed as keyword arguments. +Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, further configuration options can be passed as keyword arguments. Configure for the current SF project ************************************ @@ -71,7 +71,7 @@ The following is an example for using ``store_account`` with defaults: where ``"MyToken"`` contains the user specific authentication token. -It is advised to execute this code snippet **only once** per configuration in the same directory where the SF project can be found. It should also be separated from any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. +It is advised to execute this code snippet **only once** per configuration. This should be done in the same directory where the SF project can be found, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. Configure for every SF project ****************************** diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 848bc0199..5a6f40076 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -224,7 +224,7 @@ def store_account(authentication_token, filename="config.toml", location="local" """Stores an account in a configuration file. The configuration file can be created in the following locations: - - current working direct (local) + - current working directory (local) - user configuration directory (user_config) Args: diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index c56f4a10b..d24c66195 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -442,8 +442,8 @@ def test_global_config_created(self, monkeypatch, tmpdir): assert call_history[0][1] == tmpdir.join(test_filename) def test_location_not_recognized_error(self, monkeypatch, tmpdir): - """Tests that a configuration file was created in the user - configuration directory for Strawberry Fields.""" + """Tests that an error is raised if the configuration file is supposed + to be created in an unrecognized directory.""" test_filename = "test_config.toml" @@ -467,7 +467,8 @@ def test_correct(self, tmpdir): assert result == OTHER_EXPECTED_CONFIG def test_file_already_existed(self, tmpdir): - """Test saving a configuration file even if the file already existed.""" + """Test saving a configuration file even if the file already + existed.""" test_filename = "test_config.toml" filepath = str(tmpdir.join(test_filename)) From a033bee06bf298f4d53484f0778b47b426705c06 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 12:26:53 -0500 Subject: [PATCH 204/335] Refactor tests into coherent modules --- starship | 2 +- strawberryfields/api/job.py | 3 +- strawberryfields/engine.py | 13 +- tests/api/conftest.py | 33 ++++ tests/api/test_connection.py | 200 +++++++++++++++++++ tests/api/test_job.py | 44 +++++ tests/api/test_result.py | 29 +++ tests/api/test_starship_engine.py | 105 ++++++++++ tests/frontend/test_engine.py | 318 ------------------------------ 9 files changed, 415 insertions(+), 332 deletions(-) create mode 100644 tests/api/conftest.py create mode 100644 tests/api/test_connection.py create mode 100644 tests/api/test_job.py create mode 100644 tests/api/test_result.py create mode 100644 tests/api/test_starship_engine.py diff --git a/starship b/starship index d0f04f641..bf86246c0 100755 --- a/starship +++ b/starship @@ -54,7 +54,7 @@ if __name__ == "__main__": eng = StarshipEngine("chip2", connection) sys.stdout.write("Computing...\n") - result = eng.run(program, shots=1) + result = eng.run(program) if result and result.samples is not None: if args.output: diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index adfe7a94b..482ac21d3 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -14,10 +14,9 @@ """ TODO """ +import enum -from strawberryfields.program import Program from .result import Result -import enum class InvalidJobOperationError(Exception): diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 5b4fd82bb..9ca00e904 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -19,21 +19,12 @@ """ import abc import collections.abc -import enum -import io -import json import time -from datetime import datetime -from typing import Dict, List, Optional -from urllib.parse import urljoin - -import numpy as np -import requests +from typing import Optional +from strawberryfields.api import Connection, Job, JobStatus, Result from strawberryfields.configuration import DEFAULT_CONFIG -from strawberryfields.io import to_blackbird from strawberryfields.program import Program -from strawberryfields.api import Connection, Job, JobStatus, Result from .backends import load_backend from .backends.base import BaseBackend, NotApplicableError diff --git a/tests/api/conftest.py b/tests/api/conftest.py new file mode 100644 index 000000000..2429fe062 --- /dev/null +++ b/tests/api/conftest.py @@ -0,0 +1,33 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TODO""" +import pytest + +from strawberryfields import Program, ops +from strawberryfields.api import Connection + + +@pytest.fixture +def prog(): + """Program fixture.""" + prog = Program(2) + with prog.context as q: + ops.Dgate(0.5) | q[0] + return prog + + +@pytest.fixture +def connection(): + """A mock connection object.""" + return Connection(token="token", host="host", port=123, use_ssl=True) diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py new file mode 100644 index 000000000..e43f6743a --- /dev/null +++ b/tests/api/test_connection.py @@ -0,0 +1,200 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TODO""" +import io + +import numpy as np +import pytest +import requests + +from strawberryfields.api import Connection, JobStatus, RequestFailedError + + +def mock_return(return_value): + """A helper function for defining a mock function that returns the given value for + any arguments. + """ + return lambda *args, **kwargs: return_value + + +class MockResponse: + """A mock response with a JSON or binary body.""" + + def __init__(self, status_code, json_body=None, binary_body=None): + self.status_code = status_code + self.json_body = json_body + self.binary_body = binary_body + + def json(self): + return self.json_body + + @property + def content(self): + return self.binary_body + + +class TestConnection: + """Tests for the ``Connection`` class.""" + + def test_init(self): + """Tests that a ``Connection`` is initialized correctly.""" + token, host, port, use_ssl = "token", "host", 123, True + connection = Connection(token, host, port, use_ssl) + + assert connection.token == token + assert connection.host == host + assert connection.port == port + assert connection.use_ssl == use_ssl + + assert connection._url("/abc") == "https://host:123/abc" + + def test_create_job(self, prog, connection, monkeypatch): + """Tests a successful job creation flow.""" + id_, status = "123", JobStatus.QUEUED + + monkeypatch.setattr( + requests, + "post", + mock_return(MockResponse(201, {"id": id_, "status": status})), + ) + + job = connection.create_job("chip2", prog, 1) + + assert job.id == id_ + assert job.status == status + + def test_create_job_error(self, prog, connection, monkeypatch): + """Tests a failed job creation flow.""" + monkeypatch.setattr(requests, "post", mock_return(MockResponse(400, {}))) + + with pytest.raises(RequestFailedError): + connection.create_job("chip2", prog, 1) + + @pytest.mark.skip(reason="method not yet implemented") + def test_get_all_jobs(self, connection, monkeypatch): + """Tests a successful job list request.""" + jobs = [ + { + "id": str(i), + "status": JobStatus.COMPLETED, + "created_at": "2020-01-{:02d}T12:34:56.123456Z".format(i), + } + for i in range(1, 10) + ] + monkeypatch.setattr( + requests, "get", mock_return(MockResponse(200, {"data": jobs})), + ) + + jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) + + assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] + + @pytest.mark.skip(reason="method not yet implemented") + def test_get_all_jobs_error(self, connection, monkeypatch): + """Tests a failed job list request.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) + + with pytest.raises(RequestFailedError): + connection.get_all_jobs() + + def test_get_job(self, connection, monkeypatch): + """Tests a successful job request.""" + id_, status = "123", JobStatus.COMPLETED + + monkeypatch.setattr( + requests, + "get", + mock_return(MockResponse(200, {"id": id_, "status": status.value})), + ) + + job = connection.get_job(id_) + + assert job.id == id_ + assert job.status == status + + def test_get_job_error(self, connection, monkeypatch): + """Tests a failed job request.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) + + with pytest.raises(RequestFailedError): + connection.get_job("123") + + def test_get_job_status(self, connection, monkeypatch): + """Tests a successful job status request.""" + id_, status = "123", JobStatus.COMPLETED + + monkeypatch.setattr( + requests, + "get", + mock_return(MockResponse(200, {"id": id_, "status": status.value})), + ) + + assert connection.get_job_status(id_) == status + + def test_get_job_status_error(self, connection, monkeypatch): + """Tests a failed job status request.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) + + with pytest.raises(RequestFailedError): + connection.get_job_status("123") + + def test_get_job_result(self, connection, monkeypatch): + """Tests a successful job result request.""" + result_samples = np.array([[1, 2], [3, 4]], dtype=np.int8) + + with io.BytesIO() as buf: + np.save(buf, result_samples) + buf.seek(0) + monkeypatch.setattr( + requests, + "get", + mock_return(MockResponse(200, binary_body=buf.getvalue())), + ) + + result = connection.get_job_result("123") + + assert np.array_equal(result.samples.T, result_samples) + + def test_get_job_result_error(self, connection, monkeypatch): + """Tests a failed job result request.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) + + with pytest.raises(RequestFailedError): + connection.get_job_result("123") + + def test_cancel_job(self, connection, monkeypatch): + """Tests a successful job cancellation request.""" + monkeypatch.setattr(requests, "patch", mock_return(MockResponse(204, {}))) + + # A successful cancellation does not raise an exception + connection.cancel_job("123") + + def test_cancel_job_error(self, connection, monkeypatch): + """Tests a failed job cancellation request.""" + monkeypatch.setattr(requests, "patch", mock_return(MockResponse(404, {}))) + + with pytest.raises(RequestFailedError): + connection.cancel_job("123") + + def test_ping_success(self, connection, monkeypatch): + """Tests a successful ping to the remote host.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(200, {}))) + + assert connection.ping() + + def test_ping_failure(self, connection, monkeypatch): + """Tests a failed ping to the remote host.""" + monkeypatch.setattr(requests, "get", mock_return(MockResponse(500, {}))) + + assert not connection.ping() diff --git a/tests/api/test_job.py b/tests/api/test_job.py new file mode 100644 index 000000000..f205a3fed --- /dev/null +++ b/tests/api/test_job.py @@ -0,0 +1,44 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TODO""" +import pytest + +from strawberryfields.api import InvalidJobOperationError, Job, JobStatus + + +class TestJob: + """Tests for the ``Job`` class.""" + + def test_incomplete_job_raises_on_result_access(self, connection): + """Tests that `job.result` raises an error for an incomplete job.""" + job = Job("abc", status=JobStatus.QUEUED, connection=connection) + + with pytest.raises(AttributeError): + _ = job.result + + def test_final_job_raises_on_refresh(self, connection): + """Tests that `job.refresh()` raises an error for a complete, failed, or + cancelled job.""" + job = Job("abc", status=JobStatus.COMPLETED, connection=connection) + + with pytest.raises(InvalidJobOperationError): + job.refresh() + + def test_final_job_raises_on_cancel(self, connection): + """Tests that `job.cancel()` raises an error for a complete, failed, or + aleady cancelled job.""" + job = Job("abc", status=JobStatus.COMPLETED, connection=connection) + + with pytest.raises(InvalidJobOperationError): + job.cancel() diff --git a/tests/api/test_result.py b/tests/api/test_result.py new file mode 100644 index 000000000..099a3c295 --- /dev/null +++ b/tests/api/test_result.py @@ -0,0 +1,29 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TODO""" +import pytest + +from strawberryfields.api import Result + + +class TestResult: + """Tests for the ``Result`` class.""" + + def test_stateless_result_raises_on_state_access(self): + """Tests that `result.state` raises an error for a stateless result. + """ + result = Result([[1, 2], [3, 4]], is_stateful=False) + + with pytest.raises(AttributeError): + _ = result.state diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py new file mode 100644 index 000000000..3d5e158ba --- /dev/null +++ b/tests/api/test_starship_engine.py @@ -0,0 +1,105 @@ +# Copyright 2020 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TODO""" +import numpy as np +import pytest + +from strawberryfields.api import Connection, Job, JobStatus, Result +from strawberryfields.engine import StarshipEngine + + +def mock_return(return_value): + """A helper function for defining a mock function that returns the given value for + any arguments. + """ + return lambda *args, **kwargs: return_value + + +class MockServer: + """A mock platform server that fakes a processing delay by counting requests.""" + + REQUESTS_BEFORE_COMPLETED = 3 + + def __init__(self): + self.request_count = 0 + + def get_job_status(self, _id): + """Returns a 'queued' job status until the number of requests exceeds a defined + threshold, beyond which a 'complete' job status is returned. + """ + self.request_count += 1 + return ( + JobStatus.COMPLETED + if self.request_count >= self.REQUESTS_BEFORE_COMPLETED + else JobStatus.QUEUED + ) + + +class TestStarshipEngine: + """Tests for the ``StarshipEngine`` class.""" + + def test_run_complete(self, connection, prog, monkeypatch): + """Tests a successful synchronous job execution.""" + id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) + + server = MockServer() + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), + ) + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) + + engine = StarshipEngine("chip2", connection=connection) + result = engine.run(prog) + + assert np.array_equal(result.samples.T, result_expected) + + with pytest.raises(AttributeError): + _ = result.state + + def test_run_async(self, connection, prog, monkeypatch): + """Tests a successful asynchronous job execution.""" + id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) + + server = MockServer() + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), + ) + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result(result_expected, is_stateful=False)), + ) + + engine = StarshipEngine("chip2", connection=connection) + job = engine.run_async(prog) + assert job.status == JobStatus.OPEN + + for _ in range(server.REQUESTS_BEFORE_COMPLETED): + job.refresh() + + assert job.status == JobStatus.COMPLETED + assert np.array_equal(job.result.samples.T, result_expected) + + with pytest.raises(AttributeError): + _ = job.result.state diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index 572a8a144..10a25be41 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -12,26 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. r"""Unit tests for engine.py""" -from datetime import datetime -import io - -import numpy as np import pytest -import requests import strawberryfields as sf from strawberryfields import ops -from strawberryfields.api import ( - Connection, - Job, - JobStatus, - RequestFailedError, - Result, - InvalidJobOperationError, - RequestFailedError, -) from strawberryfields.backends.base import BaseBackend -from strawberryfields.engine import StarshipEngine pytestmark = pytest.mark.frontend @@ -173,306 +158,3 @@ def inspect(): eng.reset() eng.run([p1, p2]) assert inspect() == expected2 - - -@pytest.fixture -def connection(): - """A mock connection object.""" - return Connection(token="token", host="host", port=123, use_ssl=True) - - -def mock_return(return_value): - """A helper function for defining a mock function that returns the given value for - any arguments. - """ - return lambda *args, **kwargs: return_value - - -class MockResponse: - """A mock response with a JSON or binary body.""" - - def __init__(self, status_code, json_body=None, binary_body=None): - self.status_code = status_code - self.json_body = json_body - self.binary_body = binary_body - - def json(self): - return self.json_body - - @property - def content(self): - return self.binary_body - - -class MockServer: - """A mock platform server that fakes a processing delay by counting requests.""" - - REQUESTS_BEFORE_COMPLETED = 3 - - def __init__(self): - self.request_count = 0 - - def get_job_status(self, _id): - """Returns a 'queued' job status until the number of requests exceeds a defined - threshold, beyond which a 'complete' job status is returned. - """ - self.request_count += 1 - return ( - JobStatus.COMPLETED - if self.request_count >= self.REQUESTS_BEFORE_COMPLETED - else JobStatus.QUEUED - ) - - -class TestResult: - """Tests for the ``Result`` class.""" - - def stateless_result_raises_on_state_access(self): - """Tests that `result.state` raises an error for a stateless result. - """ - result = Result([[1, 2], [3, 4]], is_stateful=False) - - with pytest.raises(AttributeError): - _ = result.state - - -class TestJob: - """Tests for the ``Job`` class.""" - - def incomplete_job_raises_on_result_access(self): - """Tests that `job.result` raises an error for an incomplete job.""" - job = Job("abc", status=JobStatus.QUEUED, connection=Connection) - - with pytest.raises(AttributeError): - _ = job.result - - def final_job_raises_on_refresh(self): - """Tests that `job.refresh()` raises an error for a complete, failed, or - cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETED, connection=Connection) - - with pytest.raises(InvalidJobOperationError): - job.refresh() - - def final_job_raises_on_cancel(self): - """Tests that `job.cancel()` raises an error for a complete, failed, or - aleady cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETED, connection=Connection) - - with pytest.raises(InvalidJobOperationError): - job.cancel() - - -class TestConnection: - """Tests for the ``Connection`` class.""" - - def test_init(self): - """Tests that a ``Connection`` is initialized correctly.""" - token, host, port, use_ssl = "token", "host", 123, True - connection = Connection(token, host, port, use_ssl) - - assert connection.token == token - assert connection.host == host - assert connection.port == port - assert connection.use_ssl == use_ssl - - assert connection._url("/abc") == "https://host:123/abc" - - def test_create_job(self, prog, connection, monkeypatch): - """Tests a successful job creation flow.""" - id_, status = "123", JobStatus.QUEUED - - monkeypatch.setattr( - requests, - "post", - mock_return(MockResponse(201, {"id": id_, "status": status})), - ) - - job = connection.create_job("chip2", prog, 1) - - assert job.id == id_ - assert job.status == status - - def test_create_job_error(self, prog, connection, monkeypatch): - """Tests a failed job creation flow.""" - monkeypatch.setattr(requests, "post", mock_return(MockResponse(400, {}))) - - with pytest.raises(RequestFailedError): - connection.create_job("chip2", prog, 1) - - @pytest.mark.skip(reason="method not yet implemented") - def test_get_all_jobs(self, connection, monkeypatch): - """Tests a successful job list request.""" - jobs = [ - { - "id": str(i), - "status": JobStatus.COMPLETED, - "created_at": "2020-01-{:02d}T12:34:56.123456Z".format(i), - } - for i in range(1, 10) - ] - monkeypatch.setattr( - requests, "get", mock_return(MockResponse(200, {"data": jobs})), - ) - - jobs = connection.get_all_jobs(after=datetime(2020, 1, 5)) - - assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] - - @pytest.mark.skip(reason="method not yet implemented") - def test_get_all_jobs_error(self, connection, monkeypatch): - """Tests a failed job list request.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - - with pytest.raises(RequestFailedError): - connection.get_all_jobs() - - def test_get_job(self, connection, monkeypatch): - """Tests a successful job request.""" - id_, status = "123", JobStatus.COMPLETED - - monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, {"id": id_, "status": status.value})), - ) - - job = connection.get_job(id_) - - assert job.id == id_ - assert job.status == status - - def test_get_job_error(self, connection, monkeypatch): - """Tests a failed job request.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - - with pytest.raises(RequestFailedError): - connection.get_job("123") - - def test_get_job_status(self, connection, monkeypatch): - """Tests a successful job status request.""" - id_, status = "123", JobStatus.COMPLETED - - monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, {"id": id_, "status": status.value})), - ) - - assert connection.get_job_status(id_) == status - - def test_get_job_status_error(self, connection, monkeypatch): - """Tests a failed job status request.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - - with pytest.raises(RequestFailedError): - connection.get_job_status("123") - - def test_get_job_result(self, connection, monkeypatch): - """Tests a successful job result request.""" - result_samples = np.array([[1, 2], [3, 4]], dtype=np.int8) - - with io.BytesIO() as buf: - np.save(buf, result_samples) - buf.seek(0) - monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, binary_body=buf.getvalue())), - ) - - result = connection.get_job_result("123") - - assert np.array_equal(result.samples.T, result_samples) - - def test_get_job_result_error(self, connection, monkeypatch): - """Tests a failed job result request.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - - with pytest.raises(RequestFailedError): - connection.get_job_result("123") - - def test_cancel_job(self, connection, monkeypatch): - """Tests a successful job cancellation request.""" - monkeypatch.setattr(requests, "patch", mock_return(MockResponse(204, {}))) - - # A successful cancellation does not raise an exception - connection.cancel_job("123") - - def test_cancel_job_error(self, connection, monkeypatch): - """Tests a failed job cancellation request.""" - monkeypatch.setattr(requests, "patch", mock_return(MockResponse(404, {}))) - - with pytest.raises(RequestFailedError): - connection.cancel_job("123") - - def test_ping_success(self, connection, monkeypatch): - """Tests a successful ping to the remote host.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(200, {}))) - - assert connection.ping() - - def test_ping_failure(self, connection, monkeypatch): - """Tests a failed ping to the remote host.""" - monkeypatch.setattr(requests, "get", mock_return(MockResponse(500, {}))) - - assert not connection.ping() - - -class TestStarshipEngine: - """Tests for the ``StarshipEngine`` class.""" - - def test_run_complete(self, connection, prog, monkeypatch): - """Tests a successful synchronous job execution.""" - id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) - - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) - - engine = StarshipEngine("chip2", connection=connection) - result = engine.run(prog) - - assert np.array_equal(result.samples.T, result_expected) - - with pytest.raises(AttributeError): - _ = result.state - - def test_run_async(self, connection, prog, monkeypatch): - """Tests a successful asynchronous job execution.""" - id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) - - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) - - engine = StarshipEngine("chip2", connection=connection) - job = engine.run_async(prog) - assert job.status == JobStatus.OPEN - - for _ in range(server.REQUESTS_BEFORE_COMPLETED): - job.refresh() - - assert job.status == JobStatus.COMPLETED - assert np.array_equal(job.result.samples.T, result_expected) - - with pytest.raises(AttributeError): - _ = job.result.state From 51a04484b248d10486c78633ad32b62f9e15cec2 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 13:25:45 -0500 Subject: [PATCH 205/335] Small test refactor --- tests/api/test_starship_engine.py | 56 ++++++++++++------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 3d5e158ba..b9fa052c3 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -46,60 +46,48 @@ def get_job_status(self, _id): ) +@pytest.fixture +def job_to_complete(connection, monkeypatch): + """Mocks a remote job that is completed after a certain number of requests.""" + monkeypatch.setattr( + Connection, + "create_job", + mock_return(Job(id_="123", status=JobStatus.OPEN, connection=connection)), + ) + server = MockServer() + monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) + monkeypatch.setattr( + Connection, + "get_job_result", + mock_return(Result([[1, 2], [3, 4]], is_stateful=False)), + ) + + class TestStarshipEngine: """Tests for the ``StarshipEngine`` class.""" - def test_run_complete(self, connection, prog, monkeypatch): + def test_run_complete(self, connection, prog, job_to_complete): """Tests a successful synchronous job execution.""" - id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) - - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) - engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples.T, result_expected) + assert np.array_equal(result.samples.T, np.array([[1, 2], [3, 4]])) with pytest.raises(AttributeError): _ = result.state - def test_run_async(self, connection, prog, monkeypatch): + def test_run_async(self, connection, prog, job_to_complete): """Tests a successful asynchronous job execution.""" - id_, result_expected = "123", np.array([[1, 2], [3, 4]], dtype=np.int8) - - server = MockServer() - monkeypatch.setattr( - Connection, - "create_job", - mock_return(Job(id_=id_, status=JobStatus.OPEN, connection=connection)), - ) - monkeypatch.setattr(Connection, "get_job_status", server.get_job_status) - monkeypatch.setattr( - Connection, - "get_job_result", - mock_return(Result(result_expected, is_stateful=False)), - ) engine = StarshipEngine("chip2", connection=connection) job = engine.run_async(prog) assert job.status == JobStatus.OPEN - for _ in range(server.REQUESTS_BEFORE_COMPLETED): + for _ in range(MockServer.REQUESTS_BEFORE_COMPLETED): job.refresh() assert job.status == JobStatus.COMPLETED - assert np.array_equal(job.result.samples.T, result_expected) + assert np.array_equal(job.result.samples.T, np.array([[1, 2], [3, 4]])) with pytest.raises(AttributeError): _ = job.result.state From f2a9504049d09ef3f86afa172b785c4a525a3495 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 13:39:11 -0500 Subject: [PATCH 206/335] Check exception messages in tests --- strawberryfields/api/connection.py | 14 ++++++++++---- tests/api/test_connection.py | 12 ++++++------ tests/api/test_job.py | 15 +++++++++++---- tests/api/test_result.py | 2 +- tests/api/test_starship_engine.py | 8 ++++++-- 5 files changed, 34 insertions(+), 17 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index eec2ef733..7076d3796 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -162,7 +162,7 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: connection=self, ) raise RequestFailedError( - "Job creation failed: {}".format(self._format_error_message(response)) + "Failed to create job: {}".format(self._format_error_message(response)) ) def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: @@ -196,7 +196,9 @@ def get_job(self, job_id: str) -> Job: status=JobStatus(response.json()["status"]), connection=self, ) - raise RequestFailedError(self._format_error_message(response)) + raise RequestFailedError( + "Failed to get job: {}".format(self._format_error_message(response)) + ) def get_job_status(self, job_id: str) -> JobStatus: """Returns the status of a job. @@ -229,7 +231,9 @@ def get_job_result(self, job_id: str) -> Result: buf.seek(0) samples = np.load(buf) return Result(samples, is_stateful=False) - raise RequestFailedError(self._format_error_message(response)) + raise RequestFailedError( + "Failed to get job result: {}".format(self._format_error_message(response)) + ) def cancel_job(self, job_id: str): """Cancels a job. @@ -245,7 +249,9 @@ def cancel_job(self, job_id: str): ) if response.status_code == 204: return - raise RequestFailedError(self._format_error_message(response)) + raise RequestFailedError( + "Failed to cancel job: {}".format(self._format_error_message(response)) + ) def ping(self) -> bool: """Tests the connection to the remote backend. diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index e43f6743a..0836ea293 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -78,7 +78,7 @@ def test_create_job_error(self, prog, connection, monkeypatch): """Tests a failed job creation flow.""" monkeypatch.setattr(requests, "post", mock_return(MockResponse(400, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to create job"): connection.create_job("chip2", prog, 1) @pytest.mark.skip(reason="method not yet implemented") @@ -105,7 +105,7 @@ def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list request.""" monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to get all jobs"): connection.get_all_jobs() def test_get_job(self, connection, monkeypatch): @@ -127,7 +127,7 @@ def test_get_job_error(self, connection, monkeypatch): """Tests a failed job request.""" monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to get job"): connection.get_job("123") def test_get_job_status(self, connection, monkeypatch): @@ -146,7 +146,7 @@ def test_get_job_status_error(self, connection, monkeypatch): """Tests a failed job status request.""" monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to get job"): connection.get_job_status("123") def test_get_job_result(self, connection, monkeypatch): @@ -170,7 +170,7 @@ def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to get job result"): connection.get_job_result("123") def test_cancel_job(self, connection, monkeypatch): @@ -184,7 +184,7 @@ def test_cancel_job_error(self, connection, monkeypatch): """Tests a failed job cancellation request.""" monkeypatch.setattr(requests, "patch", mock_return(MockResponse(404, {}))) - with pytest.raises(RequestFailedError): + with pytest.raises(RequestFailedError, match="Failed to cancel job"): connection.cancel_job("123") def test_ping_success(self, connection, monkeypatch): diff --git a/tests/api/test_job.py b/tests/api/test_job.py index f205a3fed..e73890110 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -24,7 +24,10 @@ def test_incomplete_job_raises_on_result_access(self, connection): """Tests that `job.result` raises an error for an incomplete job.""" job = Job("abc", status=JobStatus.QUEUED, connection=connection) - with pytest.raises(AttributeError): + with pytest.raises( + AttributeError, + match="The result is undefined for jobs that are not completed", + ): _ = job.result def test_final_job_raises_on_refresh(self, connection): @@ -32,13 +35,17 @@ def test_final_job_raises_on_refresh(self, connection): cancelled job.""" job = Job("abc", status=JobStatus.COMPLETED, connection=connection) - with pytest.raises(InvalidJobOperationError): + with pytest.raises( + InvalidJobOperationError, match="A complete job cannot be refreshed" + ): job.refresh() def test_final_job_raises_on_cancel(self, connection): """Tests that `job.cancel()` raises an error for a complete, failed, or aleady cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETED, connection=connection) + job = Job("abc", status=JobStatus.FAILED, connection=connection) - with pytest.raises(InvalidJobOperationError): + with pytest.raises( + InvalidJobOperationError, match="A failed job cannot be cancelled" + ): job.cancel() diff --git a/tests/api/test_result.py b/tests/api/test_result.py index 099a3c295..b678f1437 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -25,5 +25,5 @@ def test_stateless_result_raises_on_state_access(self): """ result = Result([[1, 2], [3, 4]], is_stateful=False) - with pytest.raises(AttributeError): + with pytest.raises(AttributeError, match="The state is undefined for a stateless computation."): _ = result.state diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index b9fa052c3..c52e744f5 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -73,7 +73,9 @@ def test_run_complete(self, connection, prog, job_to_complete): assert np.array_equal(result.samples.T, np.array([[1, 2], [3, 4]])) - with pytest.raises(AttributeError): + with pytest.raises( + AttributeError, match="The state is undefined for a stateless computation." + ): _ = result.state def test_run_async(self, connection, prog, job_to_complete): @@ -89,5 +91,7 @@ def test_run_async(self, connection, prog, job_to_complete): assert job.status == JobStatus.COMPLETED assert np.array_equal(job.result.samples.T, np.array([[1, 2], [3, 4]])) - with pytest.raises(AttributeError): + with pytest.raises( + AttributeError, match="The state is undefined for a stateless computation." + ): _ = job.result.state From 1a988ac7debc2450296a13b32e36716bd4ba922a Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 13:53:05 -0500 Subject: [PATCH 207/335] Don't raise error on refreshing final job --- strawberryfields/api/job.py | 8 +++++--- tests/api/test_job.py | 10 ---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index 482ac21d3..13b6d7dd4 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -15,9 +15,12 @@ TODO """ import enum +import logging from .result import Result +log = logging.getLogger(__name__) + class InvalidJobOperationError(Exception): """Raised when an invalid operation is performed on a job.""" @@ -110,9 +113,8 @@ def refresh(self): Only an open or queued job can be refreshed; an exception is raised otherwise. """ if self.status.is_final: - raise InvalidJobOperationError( - "A {} job cannot be refreshed".format(self.status.value) - ) + log.warning("A {} job cannot be refreshed".format(self.status.value)) + return self._status = self._connection.get_job_status(self.id) if self._status == JobStatus.COMPLETED: self._result = self._connection.get_job_result(self.id) diff --git a/tests/api/test_job.py b/tests/api/test_job.py index e73890110..7cefd87a3 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -30,16 +30,6 @@ def test_incomplete_job_raises_on_result_access(self, connection): ): _ = job.result - def test_final_job_raises_on_refresh(self, connection): - """Tests that `job.refresh()` raises an error for a complete, failed, or - cancelled job.""" - job = Job("abc", status=JobStatus.COMPLETED, connection=connection) - - with pytest.raises( - InvalidJobOperationError, match="A complete job cannot be refreshed" - ): - job.refresh() - def test_final_job_raises_on_cancel(self, connection): """Tests that `job.cancel()` raises an error for a complete, failed, or aleady cancelled job.""" From c46446afd450cf651b9f97bea1dcfcf9da4b6611 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 13:53:36 -0500 Subject: [PATCH 208/335] Log warning instead of raising error if remote job fails --- strawberryfields/engine.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 38119b47e..ad54a47f4 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -19,6 +19,7 @@ """ import abc import collections.abc +import logging import time from typing import Optional @@ -32,6 +33,8 @@ # for automodapi, do not include the classes that should appear under the top-level strawberryfields namespace __all__ = ["BaseEngine", "LocalEngine"] +log = logging.getLogger(__name__) + class BaseEngine(abc.ABC): r"""Abstract base class for quantum program executor engines. @@ -541,7 +544,11 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: if job.status == JobStatus.COMPLETED: return job.result if job.status == JobStatus.FAILED: - raise JobFailedError("The computation failed; please try again.") + log.warning( + "The remote job failed due to an internal server error; " + "please try again." + ) + return None time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) From 974d0dd1fea4a0cd3e40a3c4d6953658e775e488 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 13:56:17 -0500 Subject: [PATCH 209/335] Parametrize numpy dtype in job result test --- tests/api/test_connection.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index 0836ea293..22079e54e 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -149,9 +149,24 @@ def test_get_job_status_error(self, connection, monkeypatch): with pytest.raises(RequestFailedError, match="Failed to get job"): connection.get_job_status("123") - def test_get_job_result(self, connection, monkeypatch): + @pytest.mark.parametrize( + "result_dtype", + [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float32, + np.float64, + ], + ) + def test_get_job_result(self, connection, result_dtype, monkeypatch): """Tests a successful job result request.""" - result_samples = np.array([[1, 2], [3, 4]], dtype=np.int8) + result_samples = np.array([[1, 2], [3, 4]], dtype=result_dtype) with io.BytesIO() as buf: np.save(buf, result_samples) From 94381b3c305faf2e47d45411b6b92edcc899c16a Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 14:06:15 -0500 Subject: [PATCH 210/335] Load default args for Connection from config module --- strawberryfields/api/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 7076d3796..567879191 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -22,6 +22,7 @@ import numpy as np import requests +from strawberryfields.configuration import DEFAULT_CONFIG from strawberryfields.io import to_blackbird from strawberryfields.program import Program from .job import Job, JobStatus @@ -80,9 +81,9 @@ class Connection: def __init__( self, token: str, - host: str = "platform.strawberryfields.ai", - port: int = 443, - use_ssl: bool = True, + host: str = DEFAULT_CONFIG["api"]["hostname"], + port: int = DEFAULT_CONFIG["api"]["port"], + use_ssl: bool = DEFAULT_CONFIG["api"]["use_ssl"], ): self._token = token self._host = host From 98ff9cb7194edc399c4fae926c05a0a0132e6f4b Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 14:09:30 -0500 Subject: [PATCH 211/335] Add verbose mode for Connection --- strawberryfields/api/connection.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 567879191..1a248ac7d 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -17,6 +17,7 @@ from datetime import datetime import io import json +import logging from typing import List import numpy as np @@ -28,6 +29,8 @@ from .job import Job, JobStatus from .result import Result +log = logging.getLogger(__name__) + class RequestFailedError(Exception): """Raised when a request to the remote platform returns an error response.""" @@ -84,11 +87,13 @@ def __init__( host: str = DEFAULT_CONFIG["api"]["hostname"], port: int = DEFAULT_CONFIG["api"]["port"], use_ssl: bool = DEFAULT_CONFIG["api"]["use_ssl"], + verbose: bool = False, ): self._token = token self._host = host self._port = port self._use_ssl = use_ssl + self._verbose = verbose self._base_url = "http{}://{}:{}".format( "s" if self.use_ssl else "", self.host, self.port @@ -157,6 +162,8 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: data=json.dumps({"circuit": circuit}), ) if response.status_code == 201: + if self._verbose: + log.info("The job was successfully submitted.") return Job( id_=response.json()["id"], status=JobStatus(response.json()["status"]), @@ -249,6 +256,8 @@ def cancel_job(self, job_id: str): data={"status", JobStatus.CANCELLED.value}, ) if response.status_code == 204: + if self._verbose: + log.info("The job was successfully cancelled.") return raise RequestFailedError( "Failed to cancel job: {}".format(self._format_error_message(response)) From 9a73417f688d10e17ffa9dbd0f0e0cb116988560 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 14:55:55 -0500 Subject: [PATCH 212/335] Use prompt syntax for docstring examples in StarshipEngine and Connection --- strawberryfields/api/connection.py | 52 +++++++++++++----------------- strawberryfields/api/job.py | 2 +- strawberryfields/engine.py | 51 ++++++++++++++++------------- 3 files changed, 52 insertions(+), 53 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 1a248ac7d..45400f1fe 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -41,33 +41,31 @@ class Connection: various job operations. For basic usage, it is not necessary to manually instantiate this object; the user - is encouraged to use the higher-level interface provided by :class:`~StarshipEngine`. + is encouraged to use the higher-level interface provided by + :class:`~strawberryfields.engine.StarshipEngine`. **Example:** - The following example instantiates a :class:`~Connection` for a given API - authentication token, tests the connection, submits a new job, and makes requests - for a single or multiple existing jobs. - - .. code-block:: python - - connection = Connection(token="abc") - - # Ping the remote server - success = connection.ping() - # True if successful, or False if cannot connect or not authenticated - - # Submit a new job - job = connection.create_job("chip2", program, shots=123) - job # - - # Get all jobs submitted for this token - jobs = connection.get_all_jobs() - jobs # [, ...] - - # Get a specific job by ID - job = connection.get_job("59a1c0b1-c6a7-4f9b-ae37-0ac5eec9c413") - job # + The following example instantiates a :class:`~strawberryfields.api.Connection` for a + given API authentication token, tests the connection, submits a new job, and + retrieves an existing job. + + >>> connection = Connection(token="abc") + >>> success = connection.ping() # `True` if successful, `False` if the connection fails + >>> job = connection.create_job("chip2", program, shots=123) + >>> job + + >>> job.status + + >>> job.result + AttributeError + >>> job = connection.get_job(known_job_id) + >>> job + + >>> job.status + + >>> job.result + [[0 1 0 2 1 0 0 0]] Args: token (str): the API authentication token @@ -76,11 +74,7 @@ class Connection: use_ssl (bool): whether to use SSL for the connection """ - MAX_JOBS_REQUESTED = 100 - JOB_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" - # pylint: disable=bad-continuation - # See: https://github.com/PyCQA/pylint/issues/289 def __init__( self, token: str, @@ -176,8 +170,6 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: """Gets a list of jobs created by the user, optionally filtered by datetime. - A maximum of the 100 most recent jobs are returned. - Args: after (datetime.datetime): if provided, only jobs more recently created then ``after`` are returned diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index 13b6d7dd4..524a592f5 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -113,7 +113,7 @@ def refresh(self): Only an open or queued job can be refreshed; an exception is raised otherwise. """ if self.status.is_final: - log.warning("A {} job cannot be refreshed".format(self.status.value)) + log.warning("A %s job cannot be refreshed", self.status.value) return self._status = self._connection.get_job_status(self.id) if self._status == JobStatus.COMPLETED: diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index ad54a47f4..cfd962b69 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -451,30 +451,37 @@ class StarshipEngine: **Example:** - The following example instantiates an engine with the default configuration, and + The following examples instantiate an engine with the default configuration, and runs jobs both synchronously and asynchronously. - .. code-block:: python - - engine = StarshipEngine("chip2") - - # Run a job synchronously - result = engine.run(program, shots=1) - # (Engine blocks until job is completed) - result # [[0 1 0 2 1 0 0 0]] - - # Run a job synchronously, but cancel it before it is completed - result = engine.run(program, shots=1) - ^C # KeyboardInterrupt cancels the job - - # Run a job asynchronously - job = engine.run_async(program, shots=1) - job.status # - job.result # InvalidJobOperationError - # (After some time...) - job.refresh() - job.status # - job.result # [[0 1 0 2 1 0 0 0]] + Run a job synchronously: + + >>> engine = StarshipEngine("chip2") + >>> result = engine.run(program, shots=1) # blocking call + >>> result + [[0 1 0 2 1 0 0 0]] + + Run a job synchronously, but cancel it before it is completed using a keyboard + interrupt (`ctrl+c`): + + >>> result = engine.run(program, shots=1) + ^C--------------------------------------------------------------------------- + KeyboardInterrupt Traceback (most recent call last) + in () + ----> 1 time.sleep(10) + + Run a job asynchronously: + + >>> job = engine.run_async(program, shots=1) + >>> job.status + + >>> job.result + InvalidJobOperationError + >>> job.refresh() + >>> job.status + + >>> job.result + [[0 1 0 2 1 0 0 0]] Args: target (str): the target device From 04535b4c78babdc6c30b33c4eea51081e5292573 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 15:13:31 -0500 Subject: [PATCH 213/335] Update thewalrus dependency to pass CI --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index df17a7eaa..2fd95e54f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ sympy>=1.5 networkx>=2.0 quantum-blackbird==0.2.3 python-dateutil==2.8.0 -https://xanadu-wheels.s3.amazonaws.com/thewalrus-0.11.0.dev0%2B20200129152745-cp36-cp36m-manylinux1_x86_64.whl +https://xanadu-wheels.s3.amazonaws.com/thewalrus-0.11.0.dev0%2B20200226135924-cp36-cp36m-manylinux1_x86_64.whl toml appdirs requests==2.22.0 From a77c0e80e7939643aa2da1796629319ccfc6cae8 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 15:35:11 -0500 Subject: [PATCH 214/335] Add test marker for API package --- Makefile | 2 ++ tests/api/test_connection.py | 2 ++ tests/api/test_job.py | 2 ++ tests/api/test_result.py | 6 +++++- tests/api/test_starship_engine.py | 2 ++ tests/frontend/test_circuitspecs_gaussianunitary.py | 1 + tests/pytest.ini | 1 + 7 files changed, 15 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1ed89e17a..bd0ab1f1c 100644 --- a/Makefile +++ b/Makefile @@ -33,11 +33,13 @@ dist: .PHONY : clean clean: rm -rf strawberryfields/__pycache__ + rm -rf strawberryfields/api/__pycache__ rm -rf strawberryfields/backends/__pycache__ rm -rf strawberryfields/backends/fockbackend/__pycache__ rm -rf strawberryfields/backends/tfbackend/__pycache__ rm -rf strawberryfields/backends/gaussianbackend/__pycache__ rm -rf tests/__pycache__ + rm -rf tests/api/__pycache__ rm -rf tests/backend/__pycache__ rm -rf tests/frontend/__pycache__ rm -rf tests/integration/__pycache__ diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index 22079e54e..bb38b3aee 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -20,6 +20,8 @@ from strawberryfields.api import Connection, JobStatus, RequestFailedError +pytestmark = pytest.mark.api + def mock_return(return_value): """A helper function for defining a mock function that returns the given value for diff --git a/tests/api/test_job.py b/tests/api/test_job.py index 7cefd87a3..b2114033d 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -16,6 +16,8 @@ from strawberryfields.api import InvalidJobOperationError, Job, JobStatus +pytestmark = pytest.mark.api + class TestJob: """Tests for the ``Job`` class.""" diff --git a/tests/api/test_result.py b/tests/api/test_result.py index b678f1437..50d9914a4 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -16,6 +16,8 @@ from strawberryfields.api import Result +pytestmark = pytest.mark.api + class TestResult: """Tests for the ``Result`` class.""" @@ -25,5 +27,7 @@ def test_stateless_result_raises_on_state_access(self): """ result = Result([[1, 2], [3, 4]], is_stateful=False) - with pytest.raises(AttributeError, match="The state is undefined for a stateless computation."): + with pytest.raises( + AttributeError, match="The state is undefined for a stateless computation." + ): _ = result.state diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index c52e744f5..16eb4e458 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -18,6 +18,8 @@ from strawberryfields.api import Connection, Job, JobStatus, Result from strawberryfields.engine import StarshipEngine +pytestmark = pytest.mark.api + def mock_return(return_value): """A helper function for defining a mock function that returns the given value for diff --git a/tests/frontend/test_circuitspecs_gaussianunitary.py b/tests/frontend/test_circuitspecs_gaussianunitary.py index 119d2195f..df9ee9713 100644 --- a/tests/frontend/test_circuitspecs_gaussianunitary.py +++ b/tests/frontend/test_circuitspecs_gaussianunitary.py @@ -117,6 +117,7 @@ def test_modes_subset(depth): assert indices == sorted(list(indices)) +@pytest.mark.xfail def test_non_primitive_gates(): """Tests that the compiler is able to compile a number of non-primitive Gaussian gates""" diff --git a/tests/pytest.ini b/tests/pytest.ini index 94d982379..ef51a0df9 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -3,3 +3,4 @@ markers = backends(name1, name2, ...): test applies to named backends only frontend: test applies to frontend only apps: test applies to applications layer only + api: test applies to API only From 307966286f03d8218c623cf83e6f25a99942be5d Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:04:14 -0500 Subject: [PATCH 215/335] Add/update docstrings --- strawberryfields/api/__init__.py | 9 ++++++--- strawberryfields/api/connection.py | 2 +- strawberryfields/api/job.py | 4 ++-- strawberryfields/api/result.py | 2 +- strawberryfields/engine.py | 1 + tests/api/conftest.py | 2 +- tests/api/test_connection.py | 4 +++- tests/api/test_job.py | 4 +++- tests/api/test_result.py | 4 +++- tests/api/test_starship_engine.py | 4 +++- 10 files changed, 24 insertions(+), 12 deletions(-) diff --git a/strawberryfields/api/__init__.py b/strawberryfields/api/__init__.py index 7ad533ad8..70ef38ae8 100644 --- a/strawberryfields/api/__init__.py +++ b/strawberryfields/api/__init__.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -TODO +This package contains the modules for the low-level Strawberry Fields program +execution API. The :class:`~strawberryfields.api.Connection` class mediates +the network connection to, and exposes operations provided by, a remote program +execution backend. The :class:`~strawberryfields.api.Job` and +:class:`~strawberryfields.api.Result` classes provide interfaces for managing +program execution jobs and job results respectively. """ from .connection import Connection, RequestFailedError from .job import Job, JobStatus, InvalidJobOperationError from .result import Result - -__all__ = ["Result"] diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 45400f1fe..89ebb3adf 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -TODO +This module provides an interface to a remote program execution backend. """ from datetime import datetime import io diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index 524a592f5..e9e87fecf 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -TODO +This module provides classes for interfacing with program execution jobs on a remote backend. """ import enum import logging @@ -110,7 +110,7 @@ def refresh(self): """Refreshes the status of the job, along with the job result if the job is newly completed. - Only an open or queued job can be refreshed; an exception is raised otherwise. + Refreshing only has an effect for open or queued jobs. """ if self.status.is_final: log.warning("A %s job cannot be refreshed", self.status.value) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 2f8b7d791..157e006b4 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -TODO +This module provides a class that represents the result of a quantum computation. """ import numpy as np diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index cfd962b69..847be9935 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -559,6 +559,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) + return None def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. diff --git a/tests/api/conftest.py b/tests/api/conftest.py index 2429fe062..ec04f9bf2 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""TODO""" +"""Test fixtures for strawberryfield.api""" import pytest from strawberryfields import Program, ops diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index bb38b3aee..527016052 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""TODO""" +""" +Unit tests for strawberryfields.api.connection +""" import io import numpy as np diff --git a/tests/api/test_job.py b/tests/api/test_job.py index b2114033d..832109355 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""TODO""" +""" +Unit tests for strawberryfields.api.job +""" import pytest from strawberryfields.api import InvalidJobOperationError, Job, JobStatus diff --git a/tests/api/test_result.py b/tests/api/test_result.py index 50d9914a4..9720535d0 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""TODO""" +""" +Unit tests for strawberryfields.api.result +""" import pytest from strawberryfields.api import Result diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 16eb4e458..958d87ac6 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""TODO""" +""" +Unit tests for strawberryfields.engine.StarshipEngine +""" import numpy as np import pytest From e889047e9f9859159ff657c2ca83ea040b5563c1 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:14:34 -0500 Subject: [PATCH 216/335] Pylint, general cleanup --- strawberryfields/api/__init__.py | 2 +- strawberryfields/api/connection.py | 1 + tests/api/__init__.py | 0 tests/api/conftest.py | 11 +++++++---- tests/api/test_connection.py | 6 ++++++ tests/api/test_job.py | 2 ++ tests/api/test_result.py | 2 ++ tests/api/test_starship_engine.py | 2 ++ 8 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 tests/api/__init__.py diff --git a/strawberryfields/api/__init__.py b/strawberryfields/api/__init__.py index 70ef38ae8..fe0c5dcfe 100644 --- a/strawberryfields/api/__init__.py +++ b/strawberryfields/api/__init__.py @@ -21,5 +21,5 @@ """ from .connection import Connection, RequestFailedError -from .job import Job, JobStatus, InvalidJobOperationError +from .job import InvalidJobOperationError, Job, JobStatus from .result import Result diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 89ebb3adf..954cf0e84 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -26,6 +26,7 @@ from strawberryfields.configuration import DEFAULT_CONFIG from strawberryfields.io import to_blackbird from strawberryfields.program import Program + from .job import Job, JobStatus from .result import Result diff --git a/tests/api/__init__.py b/tests/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/api/conftest.py b/tests/api/conftest.py index ec04f9bf2..00aef0538 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -11,20 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Test fixtures for strawberryfield.api""" +""" +Test fixtures for strawberryfields.api tests +""" import pytest from strawberryfields import Program, ops from strawberryfields.api import Connection +# pylint: disable=expression-not-assigned @pytest.fixture def prog(): """Program fixture.""" - prog = Program(2) - with prog.context as q: + program = Program(2) + with program.context as q: ops.Dgate(0.5) | q[0] - return prog + return program @pytest.fixture diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index 527016052..ac9b9c9f7 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -14,6 +14,7 @@ """ Unit tests for strawberryfields.api.connection """ +from datetime import datetime import io import numpy as np @@ -22,6 +23,8 @@ from strawberryfields.api import Connection, JobStatus, RequestFailedError +# pylint: disable=no-self-use + pytestmark = pytest.mark.api @@ -41,10 +44,12 @@ def __init__(self, status_code, json_body=None, binary_body=None): self.binary_body = binary_body def json(self): + """Mocks the ``requests.Response.json()`` method.""" return self.json_body @property def content(self): + """Mocks the ``requests.Response.content`` property.""" return self.binary_body @@ -61,6 +66,7 @@ def test_init(self): assert connection.port == port assert connection.use_ssl == use_ssl + # pylint: disable=protected-access assert connection._url("/abc") == "https://host:123/abc" def test_create_job(self, prog, connection, monkeypatch): diff --git a/tests/api/test_job.py b/tests/api/test_job.py index 832109355..044498ba0 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -18,6 +18,8 @@ from strawberryfields.api import InvalidJobOperationError, Job, JobStatus +# pylint: disable=bad-continuation,no-self-use + pytestmark = pytest.mark.api diff --git a/tests/api/test_result.py b/tests/api/test_result.py index 9720535d0..04a944130 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -18,6 +18,8 @@ from strawberryfields.api import Result +# pylint: disable=bad-continuation,no-self-use + pytestmark = pytest.mark.api diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 958d87ac6..1c29246a3 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -20,6 +20,8 @@ from strawberryfields.api import Connection, Job, JobStatus, Result from strawberryfields.engine import StarshipEngine +# pylint: disable=bad-continuation,unused-argument,no-self-use,redefined-outer-name + pytestmark = pytest.mark.api From d3c26fe975c4a48d3cf8844bc38d036ab0c488d6 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:32:53 -0500 Subject: [PATCH 217/335] Add argument check for cancel_job test --- strawberryfields/api/connection.py | 2 +- tests/api/test_connection.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 954cf0e84..363391084 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -246,7 +246,7 @@ def cancel_job(self, job_id: str): response = requests.patch( self._url(path), headers=self._headers, - data={"status", JobStatus.CANCELLED.value}, + data={"status": JobStatus.CANCELLED.value}, ) if response.status_code == 204: if self._verbose: diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index ac9b9c9f7..99dbe957d 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -200,7 +200,15 @@ def test_get_job_result_error(self, connection, monkeypatch): def test_cancel_job(self, connection, monkeypatch): """Tests a successful job cancellation request.""" - monkeypatch.setattr(requests, "patch", mock_return(MockResponse(204, {}))) + # A custom `mock_return` that checks for expected arguments + def _mock_return(return_value): + def function(*args, **kwargs): + assert kwargs.get("data") == {"status": "cancelled"} + return return_value + + return function + + monkeypatch.setattr(requests, "patch", _mock_return(MockResponse(204, {}))) # A successful cancellation does not raise an exception connection.cancel_job("123") From 91d194d57d28489a775e7a1945b5655f5bfe4296 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:53:00 -0500 Subject: [PATCH 218/335] Update tests/api/test_starship_engine.py Co-Authored-By: Nathan Killoran --- tests/api/test_starship_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 1c29246a3..613e32fd6 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -82,7 +82,7 @@ def test_run_complete(self, connection, prog, job_to_complete): with pytest.raises( AttributeError, match="The state is undefined for a stateless computation." ): - _ = result.state + result.state def test_run_async(self, connection, prog, job_to_complete): """Tests a successful asynchronous job execution.""" From 89417a3a7b6b906b6cd97221cd0705888a73e358 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:53:40 -0500 Subject: [PATCH 219/335] Update tests/api/test_starship_engine.py Co-Authored-By: Nathan Killoran --- tests/api/test_starship_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 613e32fd6..b5fb15d29 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -100,4 +100,4 @@ def test_run_async(self, connection, prog, job_to_complete): with pytest.raises( AttributeError, match="The state is undefined for a stateless computation." ): - _ = job.result.state + job.result.state From 6792bf1d4ab5181513ee4151393fe199f944d50d Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:54:02 -0500 Subject: [PATCH 220/335] Update tests/api/test_result.py Co-Authored-By: Nathan Killoran --- tests/api/test_result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/api/test_result.py b/tests/api/test_result.py index 04a944130..29e2ec437 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -34,4 +34,4 @@ def test_stateless_result_raises_on_state_access(self): with pytest.raises( AttributeError, match="The state is undefined for a stateless computation." ): - _ = result.state + result.state From e025d523e1d9946ca4292b8ec2f7089aba80673b Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 16:54:28 -0500 Subject: [PATCH 221/335] Update tests/api/test_job.py Co-Authored-By: Nathan Killoran --- tests/api/test_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/api/test_job.py b/tests/api/test_job.py index 044498ba0..ed7a89ff7 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -34,7 +34,7 @@ def test_incomplete_job_raises_on_result_access(self, connection): AttributeError, match="The result is undefined for jobs that are not completed", ): - _ = job.result + job.result def test_final_job_raises_on_cancel(self, connection): """Tests that `job.cancel()` raises an error for a complete, failed, or From b720f335083f83312b340026b3c1f89bf2389dbe Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 16:55:52 -0500 Subject: [PATCH 222/335] Update the link for thewalrus in requirements.txt Equivalent to the PR on master https://github.com/XanaduAI/strawberryfields/pull/307 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index df17a7eaa..2fd95e54f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ sympy>=1.5 networkx>=2.0 quantum-blackbird==0.2.3 python-dateutil==2.8.0 -https://xanadu-wheels.s3.amazonaws.com/thewalrus-0.11.0.dev0%2B20200129152745-cp36-cp36m-manylinux1_x86_64.whl +https://xanadu-wheels.s3.amazonaws.com/thewalrus-0.11.0.dev0%2B20200226135924-cp36-cp36m-manylinux1_x86_64.whl toml appdirs requests==2.22.0 From d0edc7f9cb9dcc5bdea894f26f81c09c1744f2c0 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:01:27 -0500 Subject: [PATCH 223/335] Update error tests for job.cancel() --- tests/api/test_job.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tests/api/test_job.py b/tests/api/test_job.py index ed7a89ff7..abbeff0a2 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -36,12 +36,29 @@ def test_incomplete_job_raises_on_result_access(self, connection): ): job.result - def test_final_job_raises_on_cancel(self, connection): - """Tests that `job.cancel()` raises an error for a complete, failed, or - aleady cancelled job.""" + def test_completed_job_raises_on_cancel_request(self, connection): + """Tests that `job.cancel()` raises an error for a completed job.""" + job = Job("abc", status=JobStatus.COMPLETED, connection=connection) + + with pytest.raises( + InvalidJobOperationError, match="A complete job cannot be cancelled" + ): + job.cancel() + + def test_failed_job_raises_on_cancel_request(self, connection): + """Tests that `job.cancel()` raises an error for a failed job.""" job = Job("abc", status=JobStatus.FAILED, connection=connection) with pytest.raises( InvalidJobOperationError, match="A failed job cannot be cancelled" ): job.cancel() + + def test_cancelled_job_raises_on_cancel_request(self, connection): + """Tests that `job.cancel()` raises an error for a completed job.""" + job = Job("abc", status=JobStatus.CANCELLED, connection=connection) + + with pytest.raises( + InvalidJobOperationError, match="A cancelled job cannot be cancelled" + ): + job.cancel() From eff0f59be1bbe0d5ef0938e13cc26c6bf2fab220 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:09:16 -0500 Subject: [PATCH 224/335] Move mock_return to conftest; pylint compliance --- tests/api/conftest.py | 9 ++++++++- tests/api/test_connection.py | 10 ++-------- tests/api/test_job.py | 2 +- tests/api/test_result.py | 2 +- tests/api/test_starship_engine.py | 10 ++-------- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/tests/api/conftest.py b/tests/api/conftest.py index 00aef0538..0be7df52d 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Test fixtures for strawberryfields.api tests +Test fixtures and shared functions for strawberryfields.api tests """ import pytest @@ -34,3 +34,10 @@ def prog(): def connection(): """A mock connection object.""" return Connection(token="token", host="host", port=123, use_ssl=True) + + +def mock_return(return_value): + """A helper function for defining a mock function that returns the given value for + any arguments. + """ + return lambda *args, **kwargs: return_value diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index 99dbe957d..d18b599df 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -22,19 +22,13 @@ import requests from strawberryfields.api import Connection, JobStatus, RequestFailedError +from .conftest import mock_return -# pylint: disable=no-self-use +# pylint: disable=no-self-use,unused-argument pytestmark = pytest.mark.api -def mock_return(return_value): - """A helper function for defining a mock function that returns the given value for - any arguments. - """ - return lambda *args, **kwargs: return_value - - class MockResponse: """A mock response with a JSON or binary body.""" diff --git a/tests/api/test_job.py b/tests/api/test_job.py index abbeff0a2..b18c22a80 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -18,7 +18,7 @@ from strawberryfields.api import InvalidJobOperationError, Job, JobStatus -# pylint: disable=bad-continuation,no-self-use +# pylint: disable=bad-continuation,no-self-use,pointless-statement pytestmark = pytest.mark.api diff --git a/tests/api/test_result.py b/tests/api/test_result.py index 29e2ec437..7077005ff 100644 --- a/tests/api/test_result.py +++ b/tests/api/test_result.py @@ -18,7 +18,7 @@ from strawberryfields.api import Result -# pylint: disable=bad-continuation,no-self-use +# pylint: disable=bad-continuation,no-self-use,pointless-statement pytestmark = pytest.mark.api diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index b5fb15d29..c96e34804 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -19,19 +19,13 @@ from strawberryfields.api import Connection, Job, JobStatus, Result from strawberryfields.engine import StarshipEngine +from .conftest import mock_return -# pylint: disable=bad-continuation,unused-argument,no-self-use,redefined-outer-name +# pylint: disable=bad-continuation,unused-argument,no-self-use,redefined-outer-name,pointless-statement pytestmark = pytest.mark.api -def mock_return(return_value): - """A helper function for defining a mock function that returns the given value for - any arguments. - """ - return lambda *args, **kwargs: return_value - - class MockServer: """A mock platform server that fakes a processing delay by counting requests.""" From aa1ab17c0322d1595ddd40dee416b00a256d9b93 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:13:21 -0500 Subject: [PATCH 225/335] Remove redundant return descriptions from property docstrings --- strawberryfields/api/connection.py | 24 ++++-------------------- strawberryfields/api/job.py | 18 ++---------------- strawberryfields/engine.py | 12 ++---------- tests/api/conftest.py | 1 + tests/api/test_connection.py | 4 ++-- 5 files changed, 11 insertions(+), 48 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 363391084..168f8171e 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -97,38 +97,22 @@ def __init__( @property def token(self) -> str: - """The API authentication token. - - Returns: - str: the authentication token - """ + """The API authentication token.""" return self._token @property def host(self) -> str: - """The host for the remote platform. - - Returns: - str: the hostname - """ + """The host for the remote platform.""" return self._host @property def port(self) -> int: - """The port to connect to on the remote host. - - Returns: - int: the port number - """ + """The port to connect to on the remote host.""" return self._port @property def use_ssl(self) -> bool: - """Whether to use SSL for the connection. - - Returns: - bool: ``True`` if SSL should be used, and ``False`` otherwise - """ + """Whether to use SSL for the connection.""" return self._use_ssl def create_job(self, target: str, program: Program, shots: int) -> Job: diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index e9e87fecf..a849174c6 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -45,9 +45,6 @@ def is_final(self) -> bool: This method is primarily used to determine if an operation is valid for a given status. - - Returns: - bool: ``True`` if the job status is final, and ``False`` otherwise """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) @@ -73,20 +70,12 @@ def __init__(self, id_: str, status: JobStatus, connection: "Connection"): @property def id(self) -> str: - """The job ID. - - Returns: - str: the job ID - """ + """The job ID.""" return self._id @property def status(self) -> JobStatus: - """The job status. - - Returns: - strawberryfields.engine.JobStatus: the job status - """ + """The job status.""" return self._status @property @@ -95,9 +84,6 @@ def result(self) -> Result: This is only defined for completed jobs, and raises an exception for any other status. - - Returns: - strawberryfields.engine.Result: the job result """ if self.status != JobStatus.COMPLETED: raise AttributeError( diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 847be9935..5c06a2afa 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -513,20 +513,12 @@ def __init__(self, target: str, connection: Connection = None): @property def target(self) -> str: - """The target device used by the engine. - - Returns: - str: the target device used by the engine - """ + """The target device used by the engine.""" return self._target @property def connection(self) -> Connection: - """The connection object used by the engine. - - Returns: - strawberryfields.engine.Connection: the connection object used by the engine - """ + """The connection object used by the engine.""" return self._connection def run(self, program: Program, shots: int = 1) -> Optional[Result]: diff --git a/tests/api/conftest.py b/tests/api/conftest.py index 0be7df52d..8274cc6b3 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -21,6 +21,7 @@ # pylint: disable=expression-not-assigned + @pytest.fixture def prog(): """Program fixture.""" diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index d18b599df..8260d8bd6 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -85,7 +85,7 @@ def test_create_job_error(self, prog, connection, monkeypatch): with pytest.raises(RequestFailedError, match="Failed to create job"): connection.create_job("chip2", prog, 1) - @pytest.mark.skip(reason="method not yet implemented") + @pytest.mark.xfail(reason="method not yet implemented") def test_get_all_jobs(self, connection, monkeypatch): """Tests a successful job list request.""" jobs = [ @@ -104,7 +104,7 @@ def test_get_all_jobs(self, connection, monkeypatch): assert [job.id for job in jobs] == [str(i) for i in range(5, 10)] - @pytest.mark.skip(reason="method not yet implemented") + @pytest.mark.xfail(reason="method not yet implemented") def test_get_all_jobs_error(self, connection, monkeypatch): """Tests a failed job list request.""" monkeypatch.setattr(requests, "get", mock_return(MockResponse(404, {}))) From f8468c83c806cedd0bef74b04594c901c54697e8 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:21:09 -0500 Subject: [PATCH 226/335] Update strawberryfields/engine.py Co-Authored-By: Nathan Killoran --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 847be9935..c8c42c89d 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -555,7 +555,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: "The remote job failed due to an internal server error; " "please try again." ) - return None + return time.sleep(self.POLLING_INTERVAL_SECONDS) except KeyboardInterrupt: self._connection.cancel_job(job.id) From 86de974c421afbf5d5085dfbd5ab2a234ab310c1 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:21:45 -0500 Subject: [PATCH 227/335] Small changes to docstrings based on review --- strawberryfields/engine.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 5c06a2afa..6ce2afb76 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -461,15 +461,6 @@ class StarshipEngine: >>> result [[0 1 0 2 1 0 0 0]] - Run a job synchronously, but cancel it before it is completed using a keyboard - interrupt (`ctrl+c`): - - >>> result = engine.run(program, shots=1) - ^C--------------------------------------------------------------------------- - KeyboardInterrupt Traceback (most recent call last) - in () - ----> 1 time.sleep(10) - Run a job asynchronously: >>> job = engine.run_async(program, shots=1) @@ -525,7 +516,9 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: """Runs a remote job synchronously. In the synchronous mode, the engine blocks until the job is completed, failed, or - cancelled. If the job completes successfully, the result is returned; if the job + cancelled. A job in progress can be cancelled with a keyboard interrupt (`ctrl+c`). + + If the job completes successfully, the result is returned; if the job fails or is cancelled, ``None`` is returned. Args: From 2aa76afef7d046bef4ad8c268fbf167561673400 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:27:13 -0500 Subject: [PATCH 228/335] Update strawberryfields/engine.py Co-Authored-By: Nathan Killoran --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index c8c42c89d..1641ed7b8 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -564,7 +564,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. - In the asynchronous mode, a ``Job`` is returned immediately, and the user can + In the asynchronous mode, a ``Job`` object is returned immediately, and the user can manually refresh the status and result of the job. Args: From 3068c4cfcaf57d795c8927892ab43d7ca325f8c8 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:34:14 -0500 Subject: [PATCH 229/335] Clean up docstrings --- strawberryfields/api/connection.py | 24 ++++++++++++++++++++---- strawberryfields/api/job.py | 18 ++++++++++++++++-- strawberryfields/engine.py | 12 ++++++++++-- 3 files changed, 46 insertions(+), 8 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 168f8171e..6cc447892 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -97,22 +97,38 @@ def __init__( @property def token(self) -> str: - """The API authentication token.""" + """The API authentication token. + + Returns: + str + """ return self._token @property def host(self) -> str: - """The host for the remote platform.""" + """The host for the remote platform. + + Returns: + str + """ return self._host @property def port(self) -> int: - """The port to connect to on the remote host.""" + """The port to connect to on the remote host. + + Returns: + int + """ return self._port @property def use_ssl(self) -> bool: - """Whether to use SSL for the connection.""" + """Whether to use SSL for the connection. + + Returns: + bool + """ return self._use_ssl def create_job(self, target: str, program: Program, shots: int) -> Job: diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index a849174c6..cb76ce1c2 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -45,6 +45,9 @@ def is_final(self) -> bool: This method is primarily used to determine if an operation is valid for a given status. + + Returns: + bool """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) @@ -70,12 +73,20 @@ def __init__(self, id_: str, status: JobStatus, connection: "Connection"): @property def id(self) -> str: - """The job ID.""" + """The job ID. + + Returns: + str + """ return self._id @property def status(self) -> JobStatus: - """The job status.""" + """The job status. + + Returns: + strawberryfields.api.JobStatus + """ return self._status @property @@ -84,6 +95,9 @@ def result(self) -> Result: This is only defined for completed jobs, and raises an exception for any other status. + + Returns: + strawberryfields.api.Result """ if self.status != JobStatus.COMPLETED: raise AttributeError( diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index cd435a45d..e20a3ed42 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -504,12 +504,20 @@ def __init__(self, target: str, connection: Connection = None): @property def target(self) -> str: - """The target device used by the engine.""" + """The target device used by the engine. + + Returns: + str: the name of the target + """ return self._target @property def connection(self) -> Connection: - """The connection object used by the engine.""" + """The connection object used by the engine. + + Returns: + strawberryfields.api.Connection + """ return self._connection def run(self, program: Program, shots: int = 1) -> Optional[Result]: From 009a6a33af18c14d13ec9d4b70e19fadbcc14734 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:35:24 -0500 Subject: [PATCH 230/335] Update strawberryfields/engine.py Co-Authored-By: Nathan Killoran --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 1641ed7b8..4c2c943c9 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -565,7 +565,7 @@ def run_async(self, program: Program, shots: int = 1) -> Job: """Runs a remote job asynchronously. In the asynchronous mode, a ``Job`` object is returned immediately, and the user can - manually refresh the status and result of the job. + manually refresh the status and check for updated results of the job. Args: program (strawberryfields.Program): the quantum circuit From 5f96fb53ffca1a78846185b9b8d9b6d2f9d4a5c2 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:35:52 -0500 Subject: [PATCH 231/335] Update starship Co-Authored-By: Nathan Killoran --- starship | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starship b/starship index bf86246c0..240b1584b 100755 --- a/starship +++ b/starship @@ -53,7 +53,7 @@ if __name__ == "__main__": program = load(args.input) eng = StarshipEngine("chip2", connection) - sys.stdout.write("Computing...\n") + sys.stdout.write("Executing program on remote hardware...\n") result = eng.run(program) if result and result.samples is not None: From 714b99f85f4be92de046a94cef1b644ebb962295 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:36:49 -0500 Subject: [PATCH 232/335] Update strawberryfields/api/result.py Co-Authored-By: Nathan Killoran --- strawberryfields/api/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 157e006b4..a7cf606a4 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -35,7 +35,7 @@ class Result: **Example:** - The following examples run an existing Strawberry Fields + The following example runs an existing Strawberry Fields quantum :class:`~.Program` on the Gaussian engine to get a results object. From ee097a00536a29c702659dc35f79b718da59ff99 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:37:21 -0500 Subject: [PATCH 233/335] Update strawberryfields/api/result.py Co-Authored-By: Nathan Killoran --- strawberryfields/api/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index a7cf606a4..1c10da4d6 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -36,7 +36,7 @@ class Result: **Example:** The following example runs an existing Strawberry Fields - quantum :class:`~.Program` on the Gaussian engine to get + quantum :class:`~.Program` on the Gaussian backend to get a results object. Using this results object, the measurement samples From de45482012fe5aedc59564a3bcc8674399a89eb2 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:38:04 -0500 Subject: [PATCH 234/335] Update strawberryfields/api/result.py Co-Authored-By: Nathan Killoran --- strawberryfields/api/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 1c10da4d6..8e428352b 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -37,7 +37,7 @@ class Result: The following example runs an existing Strawberry Fields quantum :class:`~.Program` on the Gaussian backend to get - a results object. + a ``Result`` object. Using this results object, the measurement samples can be returned, as well as quantum state information. From 5c47a7c2b09250253e0642be3f540e3ac69502f9 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Wed, 26 Feb 2020 17:38:34 -0500 Subject: [PATCH 235/335] Update strawberryfields/api/result.py Co-Authored-By: Nathan Killoran --- strawberryfields/api/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 8e428352b..745d743dc 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -39,7 +39,7 @@ class Result: quantum :class:`~.Program` on the Gaussian backend to get a ``Result`` object. - Using this results object, the measurement samples + Using this ``Result`` object, the measurement samples can be returned, as well as quantum state information. >>> eng = sf.Engine("gaussian") From b878bb027dca73a9c1e7660b1e38cfdb82cfb201 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:01:33 -0500 Subject: [PATCH 236/335] Adding the testfilename fixture --- tests/conftest.py | 6 +++++- tests/frontend/test_configuration.py | 18 +++++------------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 1c84659de..8fc75f8da 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,6 +47,11 @@ pytest.param("gaussian", marks=pytest.mark.gaussian), ] +@pytest.fixture(scope="session") +def test_filename(): + """Using a filename for the tests.""" + return "test_config.toml" + if tf_available and tf.__version__[:3] == "1.3": from strawberryfields.backends.tfbackend import TFBackend @@ -209,7 +214,6 @@ def _setup_eng(num_subsystems, **kwargs): return _setup_eng - def pytest_runtest_setup(item): """Automatically skip tests if they are marked for only certain backends""" if tf_available: diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index d24c66195..299e9d8a3 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -409,12 +409,10 @@ def test_parse_environment_variable_integer(self, monkeypatch): class TestStoreAccount: """Tests for the store_account function.""" - def test_config_created_locally(self, monkeypatch, tmpdir): + def test_config_created_locally(self, monkeypatch, test_filename, tmpdir): """Tests that a configuration file was created in the current directory.""" - test_filename = "test_config.toml" - call_history = [] with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) @@ -425,12 +423,10 @@ def test_config_created_locally(self, monkeypatch, tmpdir): assert call_history[0][0] == EXPECTED_CONFIG assert call_history[0][1] == tmpdir.join(test_filename) - def test_global_config_created(self, monkeypatch, tmpdir): + def test_global_config_created(self, monkeypatch, test_filename, tmpdir): """Tests that a configuration file was created in the user configuration directory for Strawberry Fields.""" - test_filename = "test_config.toml" - call_history = [] with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NotTheCorrectDir") @@ -441,12 +437,10 @@ def test_global_config_created(self, monkeypatch, tmpdir): assert call_history[0][0] == EXPECTED_CONFIG assert call_history[0][1] == tmpdir.join(test_filename) - def test_location_not_recognized_error(self, monkeypatch, tmpdir): + def test_location_not_recognized_error(self, monkeypatch, test_filename, tmpdir): """Tests that an error is raised if the configuration file is supposed to be created in an unrecognized directory.""" - test_filename = "test_config.toml" - with pytest.raises( conf.ConfigurationError, match="This location is not recognized.", @@ -456,9 +450,8 @@ def test_location_not_recognized_error(self, monkeypatch, tmpdir): class TestSaveConfigToFile: """Tests for the store_account function.""" - def test_correct(self, tmpdir): + def test_correct(self, test_filename, tmpdir): """Test saving a configuration file.""" - test_filename = "test_config.toml" filepath = str(tmpdir.join(test_filename)) conf.save_config_to_file(OTHER_EXPECTED_CONFIG, filepath) @@ -466,10 +459,9 @@ def test_correct(self, tmpdir): result = toml.load(filepath) assert result == OTHER_EXPECTED_CONFIG - def test_file_already_existed(self, tmpdir): + def test_file_already_existed(self, test_filename, tmpdir): """Test saving a configuration file even if the file already existed.""" - test_filename = "test_config.toml" filepath = str(tmpdir.join(test_filename)) with open(filepath, "w") as f: From 35f76638af4cfb02a45e8ac1e9fce50a92586745 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:04:59 -0500 Subject: [PATCH 237/335] Update doc/introduction/configuration.rst Co-Authored-By: Nathan Killoran --- doc/introduction/configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 28faddc60..488a2a5c4 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -69,7 +69,7 @@ The following is an example for using ``store_account`` with defaults: import strawberryfields as sf sf.store_account("MyToken") -where ``"MyToken"`` contains the user specific authentication token. +where ``"MyToken"`` contains the user-specific authentication token. It is advised to execute this code snippet **only once** per configuration. This should be done in the same directory where the SF project can be found, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. @@ -83,4 +83,4 @@ The following code snippet can be used to create a configuration file for *every import strawberryfields as sf sf.store_account("MyToken", location="user_config") -where ``"MyToken"`` is the user specific authentication token. \ No newline at end of file +where ``"MyToken"`` is the user specific authentication token. From fdfc8db02c35e057292ab8d56e3f6fd3ec21afd6 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:05:55 -0500 Subject: [PATCH 238/335] Title adjustment --- doc/introduction/configuration.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 28faddc60..a3a13967a 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -59,8 +59,8 @@ Store your account Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, further configuration options can be passed as keyword arguments. -Configure for the current SF project -************************************ +Default configuration +********************* The following is an example for using ``store_account`` with defaults: @@ -73,8 +73,8 @@ where ``"MyToken"`` contains the user specific authentication token. It is advised to execute this code snippet **only once** per configuration. This should be done in the same directory where the SF project can be found, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. -Configure for every SF project -****************************** +Configure for every project +*************************** The following code snippet can be used to create a configuration file for *every Strawberry Fields project*. From 245b3632d66cbd82c6edf2ad97874aaf94122e30 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:29:43 -0500 Subject: [PATCH 239/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 5a6f40076..05fc8e081 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -224,7 +224,7 @@ def store_account(authentication_token, filename="config.toml", location="local" """Stores an account in a configuration file. The configuration file can be created in the following locations: - - current working directory (local) + - current working directory ("local") - user configuration directory (user_config) Args: From d0eda0cae1ea5f7cd2fc3eda3709c39bad21371e Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:29:53 -0500 Subject: [PATCH 240/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 05fc8e081..9374aca12 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -225,7 +225,7 @@ def store_account(authentication_token, filename="config.toml", location="local" The configuration file can be created in the following locations: - current working directory ("local") - - user configuration directory (user_config) + - user configuration directory ("user_config") Args: authentication_token (str): the authentication token to use when From 8a22aa0f1a9b1aea06be29287d9a484ec82002b1 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:30:03 -0500 Subject: [PATCH 241/335] Update strawberryfields/configuration.py Co-Authored-By: Nathan Killoran --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 9374aca12..ce7d8466e 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -229,7 +229,7 @@ def store_account(authentication_token, filename="config.toml", location="local" Args: authentication_token (str): the authentication token to use when - connecting to the API, it will be sent with every request in the + connecting to the API; it will be sent with every request in the header Kwargs: From cbb31b476bfad7548fc1a0cd6de8883c6ed6bb72 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:32:56 -0500 Subject: [PATCH 242/335] Applying suggested changes --- doc/introduction/configuration.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 1de5eda09..3f1f126a2 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -71,16 +71,16 @@ The following is an example for using ``store_account`` with defaults: where ``"MyToken"`` contains the user-specific authentication token. -It is advised to execute this code snippet **only once** per configuration. This should be done in the same directory where the SF project can be found, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. +It is advised to execute this code snippet **only once**, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. -Configure for every project +Configuration for a project *************************** -The following code snippet can be used to create a configuration file for *every Strawberry Fields project*. +The following code snippet can be run in the *working directory* of a Strawberry Fields project to create a configuration file for *only* for this project. .. code-block:: python import strawberryfields as sf - sf.store_account("MyToken", location="user_config") + sf.store_account("MyToken", location="local") where ``"MyToken"`` is the user specific authentication token. From 853004fe0b04bd9bec7cee7fa95fb8d7e2270660 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:42:15 -0500 Subject: [PATCH 243/335] Reword configuration description --- doc/introduction/configuration.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 3f1f126a2..3378008fb 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -71,12 +71,16 @@ The following is an example for using ``store_account`` with defaults: where ``"MyToken"`` contains the user-specific authentication token. -It is advised to execute this code snippet **only once**, separately from executing any other Strawberry Fields scripts. Using the default options it will store the account in the *current working directory* by creating a ``config.toml`` file. +It is advised to execute this code snippet **only once**, separately any other Python scripts. + +.. note:: + + Using the default options will store the account in the *Strawberry Fields configuration directory* by creating a ``config.toml`` file. Configuration for a project *************************** -The following code snippet can be run in the *working directory* of a Strawberry Fields project to create a configuration file for *only* for this project. +The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a configuration file *only for this project*: .. code-block:: python From 3ab60d0ce47422c1802e37cf743b1a25b8d97e6d Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 18:43:50 -0500 Subject: [PATCH 244/335] Swap the default for location to user_config --- strawberryfields/configuration.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index ce7d8466e..5e1a65d31 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -220,12 +220,12 @@ def parse_environment_variable(key, value): return value -def store_account(authentication_token, filename="config.toml", location="local", **kwargs): +def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): """Stores an account in a configuration file. The configuration file can be created in the following locations: - - current working directory ("local") - user configuration directory ("user_config") + - current working directory ("local") Args: authentication_token (str): the authentication token to use when @@ -240,10 +240,10 @@ def store_account(authentication_token, filename="config.toml", location="local" Configuration options are detailed in :doc:`/introduction/configuration` """ - if location == "local": - directory = os.getcwd() - elif location == "user_config": + if location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") + elif location == "local": + directory = os.getcwd() else: raise ConfigurationError("This location is not recognized.") From 00f7e367b9841f832b76e34eb0d0d5c85207b2a1 Mon Sep 17 00:00:00 2001 From: antalszava Date: Wed, 26 Feb 2020 21:52:18 -0500 Subject: [PATCH 245/335] Update doc/introduction/configuration.rst Co-Authored-By: Josh Izaac --- doc/introduction/configuration.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 3378008fb..47d172dda 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -57,7 +57,9 @@ Configuration options Store your account ------------------ -Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, further configuration options can be passed as keyword arguments. +Using the :func:`~.store_account` function, a configuration file containing your Xanadu Cloud credentials +can be created easily. By default, this configuration file is saved *globally*, and will be used every time +a remote job is submitted. Default configuration ********************* From 3e45484abaa92df261e68d0159ccc1498d0703e8 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 09:42:07 -0500 Subject: [PATCH 246/335] Use clearer 'blocking'/'non-blocking' terminology where relevant --- strawberryfields/engine.py | 16 ++++++++-------- tests/api/test_starship_engine.py | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 63cd805e0..a8504e88f 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -447,21 +447,21 @@ class JobFailedError(Exception): class StarshipEngine: """A quantum program executor engine that that provides a simple interface for - running remote jobs in a synchronous or asynchronous manner. + running remote jobs in a blocking or non-blocking manner. **Example:** The following examples instantiate an engine with the default configuration, and - runs jobs both synchronously and asynchronously. + runs both blocking and non-blocking jobs. - Run a job synchronously: + Run a blocking job: >>> engine = StarshipEngine("chip2") >>> result = engine.run(program, shots=1) # blocking call >>> result [[0 1 0 2 1 0 0 0]] - Run a job asynchronously: + Run a non-blocking job: >>> job = engine.run_async(program, shots=1) >>> job.status @@ -521,9 +521,9 @@ def connection(self) -> Connection: return self._connection def run(self, program: Program, shots: int = 1) -> Optional[Result]: - """Runs a remote job synchronously. + """Runs a blocking job. - In the synchronous mode, the engine blocks until the job is completed, failed, or + In the blocking mode, the engine blocks until the job is completed, failed, or cancelled. A job in progress can be cancelled with a keyboard interrupt (`ctrl+c`). If the job completes successfully, the result is returned; if the job @@ -555,9 +555,9 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: return None def run_async(self, program: Program, shots: int = 1) -> Job: - """Runs a remote job asynchronously. + """Runs a non-blocking remote job. - In the asynchronous mode, a ``Job`` object is returned immediately, and the user can + In the non-blocking mode, a ``Job`` object is returned immediately, and the user can manually refresh the status and check for updated results of the job. Args: diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index c96e34804..d57e67b56 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -67,7 +67,7 @@ class TestStarshipEngine: """Tests for the ``StarshipEngine`` class.""" def test_run_complete(self, connection, prog, job_to_complete): - """Tests a successful synchronous job execution.""" + """Tests a successful blocking job execution.""" engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) @@ -79,7 +79,7 @@ def test_run_complete(self, connection, prog, job_to_complete): result.state def test_run_async(self, connection, prog, job_to_complete): - """Tests a successful asynchronous job execution.""" + """Tests a successful non-blocking job execution.""" engine = StarshipEngine("chip2", connection=connection) job = engine.run_async(prog) From 6329e0cc5707e240db9be8a60b736415ad00caf0 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 09:51:44 -0500 Subject: [PATCH 247/335] Remove obsolete exception --- strawberryfields/engine.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 886bcf368..04b41f597 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -433,21 +433,29 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): ) # check that batching is not used together with shots > 1 if self.backend_options.get("batch_size", 0) and eng_run_options["shots"] > 1: - raise NotImplementedError("Batching cannot be used together with multiple shots.") + raise NotImplementedError( + "Batching cannot be used together with multiple shots." + ) # check that post-selection and feed-forwarding is not used together with shots > 1 for p in program_lst: for c in p.circuit: try: if c.op.select and eng_run_options["shots"] > 1: - raise NotImplementedError("Post-selection cannot be used together with multiple shots.") + raise NotImplementedError( + "Post-selection cannot be used together with multiple shots." + ) except AttributeError: pass if c.op.measurement_deps and eng_run_options["shots"] > 1: - raise NotImplementedError("Feed-forwarding of measurements cannot be used together with multiple shots.") + raise NotImplementedError( + "Feed-forwarding of measurements cannot be used together with multiple shots." + ) - result = super()._run(program, args=args, compile_options=compile_options, **eng_run_options) + result = super()._run( + program, args=args, compile_options=compile_options, **eng_run_options + ) modes = temp_run_options["modes"] @@ -460,10 +468,6 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): return result -class JobFailedError(Exception): - """Raised when a remote job enters a 'failed' status.""" - - class StarshipEngine: """A quantum program executor engine that that provides a simple interface for running remote jobs in a blocking or non-blocking manner. From 27ba93f1a121e8a02fcf32f2a9fd9054df1b2f62 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 10:26:56 -0500 Subject: [PATCH 248/335] Fix Result.samples dimensions to (shots, modes) --- strawberryfields/api/result.py | 6 ++---- strawberryfields/engine.py | 7 +++---- tests/api/test_connection.py | 2 +- tests/api/test_starship_engine.py | 6 +++--- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 8f192bde9..5b732fca2 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -65,12 +65,10 @@ def __init__(self, samples, is_stateful=True): self._state = None self._is_stateful = is_stateful - # samples arrives as either a list of arrays (for shots > 1) or a list (for shots = 1) - # need to be converted to a multidimensional array with shape (shots, modes) + # samples arrives as either a 2-D array (for shots > 1) or a 1-D array (for shots = 1) + # the latter needs to be converted to a 2-D array with shape (shots, modes) if np.ndim(samples) == 1: samples = np.array([samples]) - else: - samples = np.stack(samples, 1) self._samples = samples @property diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 04b41f597..ae15b7fb8 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -23,6 +23,8 @@ import time from typing import Optional +import numpy as np + from strawberryfields.api import Connection, Job, JobStatus, Result from strawberryfields.configuration import load_config from strawberryfields.program import Program @@ -297,7 +299,7 @@ def _broadcast_nones(val, dim): prev = p if self.samples is not None: - return Result(self.samples.copy()) + return Result(np.array(self.samples).T) class LocalEngine(BaseEngine): @@ -428,9 +430,6 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): key: temp_run_options[key] for key in temp_run_options.keys() & eng_run_keys } - result = super()._run( - program, args=args, compile_options=compile_options, **eng_run_options - ) # check that batching is not used together with shots > 1 if self.backend_options.get("batch_size", 0) and eng_run_options["shots"] > 1: raise NotImplementedError( diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index 8260d8bd6..ee8c38318 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -183,7 +183,7 @@ def test_get_job_result(self, connection, result_dtype, monkeypatch): result = connection.get_job_result("123") - assert np.array_equal(result.samples.T, result_samples) + assert np.array_equal(result.samples, result_samples) def test_get_job_result_error(self, connection, monkeypatch): """Tests a failed job result request.""" diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index d57e67b56..8be5357e9 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -59,7 +59,7 @@ def job_to_complete(connection, monkeypatch): monkeypatch.setattr( Connection, "get_job_result", - mock_return(Result([[1, 2], [3, 4]], is_stateful=False)), + mock_return(Result(np.array([[1, 2], [3, 4]]), is_stateful=False)), ) @@ -71,7 +71,7 @@ def test_run_complete(self, connection, prog, job_to_complete): engine = StarshipEngine("chip2", connection=connection) result = engine.run(prog) - assert np.array_equal(result.samples.T, np.array([[1, 2], [3, 4]])) + assert np.array_equal(result.samples, np.array([[1, 2], [3, 4]])) with pytest.raises( AttributeError, match="The state is undefined for a stateless computation." @@ -89,7 +89,7 @@ def test_run_async(self, connection, prog, job_to_complete): job.refresh() assert job.status == JobStatus.COMPLETED - assert np.array_equal(job.result.samples.T, np.array([[1, 2], [3, 4]])) + assert np.array_equal(job.result.samples, np.array([[1, 2], [3, 4]])) with pytest.raises( AttributeError, match="The state is undefined for a stateless computation." From f23dcfee7b4dfaa92ffceefd37993d12b03c20cd Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 10:39:23 -0500 Subject: [PATCH 249/335] Read target from program --- starship | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starship b/starship index 240b1584b..bcf44c32e 100755 --- a/starship +++ b/starship @@ -52,7 +52,7 @@ if __name__ == "__main__": program = load(args.input) - eng = StarshipEngine("chip2", connection) + eng = StarshipEngine(program.target, connection) sys.stdout.write("Executing program on remote hardware...\n") result = eng.run(program) From c8f1cded3303cd2cc31f8737240a78bcf4a81718 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 11:30:30 -0500 Subject: [PATCH 250/335] Update imports --- starship | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/starship b/starship index bcf44c32e..6dbb358f7 100755 --- a/starship +++ b/starship @@ -21,7 +21,8 @@ backend. import sys import argparse -from strawberryfields.engine import StarshipEngine, Connection, JobFailedError +from strawberryfields.api import Connection +from strawberryfields.engine import StarshipEngine from strawberryfields.io import load From 0a4ab59545db7dd9a7ac9b08e0d93326ac248a76 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 11:45:09 -0500 Subject: [PATCH 251/335] Remove samples transpose in starship CLI --- starship | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/starship b/starship index 6dbb358f7..6ba3403b5 100755 --- a/starship +++ b/starship @@ -60,6 +60,6 @@ if __name__ == "__main__": if result and result.samples is not None: if args.output: with open(args.output, "w") as file: - file.write(str(result.samples.T)) + file.write(str(result.samples)) else: - sys.stdout.write(str(result.samples.T)) + sys.stdout.write(str(result.samples)) From e5807c39186e685695d3ee81c5762a380728ebbc Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 11:50:13 -0500 Subject: [PATCH 252/335] Clean up config loading logic --- strawberryfields/api/connection.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 6cc447892..c7983e4e9 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -23,7 +23,7 @@ import numpy as np import requests -from strawberryfields.configuration import DEFAULT_CONFIG +from strawberryfields.configuration import configuration from strawberryfields.io import to_blackbird from strawberryfields.program import Program @@ -78,10 +78,10 @@ class Connection: # pylint: disable=bad-continuation def __init__( self, - token: str, - host: str = DEFAULT_CONFIG["api"]["hostname"], - port: int = DEFAULT_CONFIG["api"]["port"], - use_ssl: bool = DEFAULT_CONFIG["api"]["use_ssl"], + token: str = configuration["api"]["authentication_token"], + host: str = configuration["api"]["hostname"], + port: int = configuration["api"]["port"], + use_ssl: bool = configuration["api"]["use_ssl"], verbose: bool = False, ): self._token = token From b24474931ecc8ba5dca1ed896558eb27d5297868 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 11:56:19 -0500 Subject: [PATCH 253/335] Simplify JobStatus string repr --- strawberryfields/api/connection.py | 4 ++-- strawberryfields/api/job.py | 6 ++++++ strawberryfields/engine.py | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index c7983e4e9..93131bdc2 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -57,14 +57,14 @@ class Connection: >>> job >>> job.status - + >>> job.result AttributeError >>> job = connection.get_job(known_job_id) >>> job >>> job.status - + >>> job.result [[0 1 0 2 1 0 0 0]] diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index cb76ce1c2..f88699e61 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -51,6 +51,12 @@ def is_final(self) -> bool: """ return self in (JobStatus.CANCELLED, JobStatus.COMPLETED, JobStatus.FAILED) + def __repr__(self) -> str: + return "".format(self.value) + + def __str__(self) -> str: + return self.__repr__() + class Job: """Represents a remote job that can be queried for its status or result. diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index ae15b7fb8..8ab151b98 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -487,12 +487,12 @@ class StarshipEngine: >>> job = engine.run_async(program, shots=1) >>> job.status - + >>> job.result InvalidJobOperationError >>> job.refresh() >>> job.status - + >>> job.result [[0 1 0 2 1 0 0 0]] From 1732433b2475dab2ba0b7867467ad375dead330e Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:02:13 -0500 Subject: [PATCH 254/335] Moving tmpdir.join calls to conftest -> changing to a function scoped fixture type (as required by tmpdir); adding integration tests to store_account; refactoring configuration test file to use test_filename and test_filepath fixtures; creating mock version of create_confing and a mock class for store_account tests --- doc/introduction/configuration.rst | 2 +- tests/conftest.py | 12 ++- tests/frontend/test_configuration.py | 149 +++++++++++++++++---------- 3 files changed, 103 insertions(+), 60 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 3378008fb..12be55f69 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -71,7 +71,7 @@ The following is an example for using ``store_account`` with defaults: where ``"MyToken"`` contains the user-specific authentication token. -It is advised to execute this code snippet **only once**, separately any other Python scripts. +It is advised to execute this code snippet **only once**, separately from any other Python scripts. .. note:: diff --git a/tests/conftest.py b/tests/conftest.py index 8fc75f8da..4802ab0f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,11 +47,17 @@ pytest.param("gaussian", marks=pytest.mark.gaussian), ] -@pytest.fixture(scope="session") +TEST_FILENAME = "test_config.toml" + +@pytest.fixture(scope="function") def test_filename(): - """Using a filename for the tests.""" - return "test_config.toml" + """Using a test filename for the tests.""" + return TEST_FILENAME +@pytest.fixture(scope="function") +def test_filepath(tmpdir): + """Using a test filepath for the tests.""" + return tmpdir.join(TEST_FILENAME) if tf_available and tf.__version__[:3] == "1.3": from strawberryfields.backends.tfbackend import TFBackend diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 299e9d8a3..d30cac379 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -75,14 +75,11 @@ def test_not_found_warning(self, caplog): conf.load_config(filename='NotAFileName') assert "No Strawberry Fields configuration file found." in caplog.text - def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): + def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir, test_filepath): """Test that the keyword arguments passed to load_config take precedence over data in environment variables or data in a configuration file.""" - - filename = tmpdir.join("config.toml") - - with open(filename, "w") as f: + with open(test_filepath, "w") as f: f.write(TEST_FILE) with monkeypatch.context() as m: @@ -101,13 +98,10 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): assert configuration == OTHER_EXPECTED_CONFIG - def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir): + def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir, test_filepath): """Test that the data in environment variables take precedence over data in a configuration file.""" - - filename = tmpdir.join("config.toml") - - with open(filename, "w") as f: + with open(test_filepath, "w") as f: f.write(TEST_FILE) with monkeypatch.context() as m: @@ -123,11 +117,11 @@ def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, assert configuration == OTHER_EXPECTED_CONFIG - def test_conf_file_loads_well(self, monkeypatch, tmpdir): + def test_conf_file_loads_well(self, monkeypatch, tmpdir, test_filepath): """Test that the load_config function loads a configuration from a TOML file correctly.""" - filename = tmpdir.join("config.toml") + filename = test_filepath with open(filename, "w") as f: f.write(TEST_FILE) @@ -181,18 +175,13 @@ def test_current_directory(self, tmpdir, monkeypatch): assert config_filepath == tmpdir.join(filename) - def test_env_variable(self, tmpdir, monkeypatch): + def test_env_variable(self, monkeypatch, tmpdir, test_filename, test_filepath): """Test that the correct configuration file is found using the correct environment variable (SF_CONF). This is a test case for when there is no configuration file in the current directory.""" - - filename = "config.toml" - - path_to_write_file = tmpdir.join(filename) - - with open(path_to_write_file, "w") as f: + with open(test_filepath, "w") as f: f.write(TEST_FILE) def raise_wrapper(ex): @@ -203,11 +192,11 @@ def raise_wrapper(ex): m.setenv("SF_CONF", tmpdir) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - config_filepath = conf.get_config_filepath(filename=filename) + config_filepath = conf.get_config_filepath(filename=test_filename) - assert config_filepath == tmpdir.join("config.toml") + assert config_filepath == test_filepath - def test_user_config_dir(self, tmpdir, monkeypatch): + def test_user_config_dir(self, monkeypatch, tmpdir, test_filename, test_filepath): """Test that the correct configuration file is found using the correct argument to the user_config_dir function. @@ -215,11 +204,7 @@ def test_user_config_dir(self, tmpdir, monkeypatch): -in the current directory or -in the directory contained in the corresponding environment variable.""" - filename = "config.toml" - - path_to_write_file = tmpdir.join(filename) - - with open(path_to_write_file, "w") as f: + with open(test_filepath, "w") as f: f.write(TEST_FILE) def raise_wrapper(ex): @@ -230,11 +215,11 @@ def raise_wrapper(ex): m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") - config_filepath = conf.get_config_filepath(filename=filename) + config_filepath = conf.get_config_filepath(filename=test_filename) - assert config_filepath == tmpdir.join("config.toml") + assert config_filepath == test_filepath - def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): + def test_no_config_file_found_returns_none(self, monkeypatch, tmpdir, test_filename): """Test that the get_config_filepath returns None if the configuration file is nowhere to be found. @@ -243,8 +228,6 @@ def test_no_config_file_found_returns_none(self, tmpdir, monkeypatch): -in the directory contained in the corresponding environment variable -in the user_config_dir directory of Strawberry Fields.""" - filename = "config.toml" - def raise_wrapper(ex): raise ex @@ -253,16 +236,16 @@ def raise_wrapper(ex): m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") - config_filepath = conf.get_config_filepath(filename=filename) + config_filepath = conf.get_config_filepath(filename=test_filename) assert config_filepath is None class TestLoadConfigFile: """Tests the load_config_file function.""" - def test_load_config_file(self, tmpdir, monkeypatch): + def test_load_config_file(self, monkeypatch, tmpdir, test_filename, test_filepath): """Tests that configuration is loaded correctly from a TOML file.""" - filename = tmpdir.join("config.toml") + filename = test_filepath with open(filename, "w") as f: f.write(TEST_FILE) @@ -271,10 +254,10 @@ def test_load_config_file(self, tmpdir, monkeypatch): assert loaded_config == EXPECTED_CONFIG - def test_loading_absolute_path(self, tmpdir, monkeypatch): + def test_loading_absolute_path(self, monkeypatch, tmpdir, test_filename, test_filepath): """Test that the default configuration file can be loaded via an absolute path.""" - filename = os.path.abspath(tmpdir.join("config.toml")) + filename = os.path.abspath(test_filepath) with open(filename, "w") as f: @@ -398,46 +381,69 @@ def test_parse_environment_variable_integer(self, monkeypatch): assert conf.parse_environment_variable("some_integer", "123") == 123 DEFAULT_KWARGS = { - "api": { - "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", - "hostname": "localhost", - "use_ssl": True, - "port": 443, + "hostname": "localhost", + "use_ssl": True, + "port": 443, } - } + +class MockSaveConfigToFile: + """A mock class used to contain the state left by the save_config_to_file + function.""" + def __init__(self): + self.config = None + self.path = None + + def update(self, config, path): + """Updates the instance attributes.""" + self.config = config + self.path = path + +def mock_create_config(authentication_token="", **kwargs): + """A mock version of the create_config function adjusted to the + store_account function. + """ + return {"api": {'authentication_token': authentication_token, **kwargs}} class TestStoreAccount: """Tests for the store_account function.""" - def test_config_created_locally(self, monkeypatch, test_filename, tmpdir): + def test_config_created_locally(self, monkeypatch, tmpdir, test_filename): """Tests that a configuration file was created in the current directory.""" + mock_save_config_file = MockSaveConfigToFile() + + assert mock_save_config_file.config is None + assert mock_save_config_file.path is None - call_history = [] with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) m.setattr(conf, "user_config_dir", lambda *args: "NotTheCorrectDir") - m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) + m.setattr(conf, "create_config", mock_create_config) + m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) conf.store_account(authentication_token, filename=test_filename, location="local", **DEFAULT_KWARGS) - assert call_history[0][0] == EXPECTED_CONFIG - assert call_history[0][1] == tmpdir.join(test_filename) + assert mock_save_config_file.config == EXPECTED_CONFIG + assert mock_save_config_file.path == tmpdir.join(test_filename) - def test_global_config_created(self, monkeypatch, test_filename, tmpdir): + def test_global_config_created(self, monkeypatch, tmpdir, test_filename): """Tests that a configuration file was created in the user configuration directory for Strawberry Fields.""" + mock_save_config_file = MockSaveConfigToFile() + + assert mock_save_config_file.config is None + assert mock_save_config_file.path is None - call_history = [] with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NotTheCorrectDir") m.setattr(conf, "user_config_dir", lambda *args: tmpdir) - m.setattr(conf, "save_config_to_file", lambda a, b: call_history.append((a, b))) + m.setattr(conf, "create_config", mock_create_config) + m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) - assert call_history[0][0] == EXPECTED_CONFIG - assert call_history[0][1] == tmpdir.join(test_filename) + assert mock_save_config_file.config == EXPECTED_CONFIG + assert mock_save_config_file.path == tmpdir.join(test_filename) - def test_location_not_recognized_error(self, monkeypatch, test_filename, tmpdir): + def test_location_not_recognized_error(self, monkeypatch, tmpdir, test_filename): """Tests that an error is raised if the configuration file is supposed to be created in an unrecognized directory.""" @@ -447,10 +453,41 @@ def test_location_not_recognized_error(self, monkeypatch, test_filename, tmpdir) ): conf.store_account(authentication_token, filename=test_filename, location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) +class TestStoreAccountIntegration: + """Integration tests for the store_account function. + + Mocking takes place only such that writing can be done in the temporary + directory. + """ + + def test_local(self, monkeypatch, tmpdir, test_filename): + """Tests that the functions integrate correctly when storing account + locally.""" + + with monkeypatch.context() as m: + m.setattr(os, "getcwd", lambda: tmpdir) + conf.store_account(authentication_token, filename=test_filename, location="local", **DEFAULT_KWARGS) + + filepath = tmpdir.join(test_filename) + result = toml.load(filepath) + assert result == EXPECTED_CONFIG + + def test_global(self, monkeypatch, tmpdir, test_filename): + """Tests that the functions integrate correctly when storing account + globally.""" + + with monkeypatch.context() as m: + m.setattr(conf, "user_config_dir", lambda *args: tmpdir) + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + + filepath = tmpdir.join(test_filename) + result = toml.load(filepath) + assert result == EXPECTED_CONFIG + class TestSaveConfigToFile: """Tests for the store_account function.""" - def test_correct(self, test_filename, tmpdir): + def test_correct(self, tmpdir, test_filename): """Test saving a configuration file.""" filepath = str(tmpdir.join(test_filename)) @@ -459,7 +496,7 @@ def test_correct(self, test_filename, tmpdir): result = toml.load(filepath) assert result == OTHER_EXPECTED_CONFIG - def test_file_already_existed(self, test_filename, tmpdir): + def test_file_already_existed(self, tmpdir, test_filename): """Test saving a configuration file even if the file already existed.""" filepath = str(tmpdir.join(test_filename)) From 5c7840cf8197a36e52f37bc292112181de77d537 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:33:17 -0500 Subject: [PATCH 255/335] Update configuration.rst --- doc/introduction/configuration.rst | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 12be55f69..4aa738a1f 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -57,34 +57,28 @@ Configuration options Store your account ------------------ -Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. Apart from that, further configuration options can be passed as keyword arguments. +Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. -Default configuration -********************* - -The following is an example for using ``store_account`` with defaults: +In these examples ``"MyToken"`` contains the user-specific authentication token. .. code-block:: python import strawberryfields as sf sf.store_account("MyToken") -where ``"MyToken"`` contains the user-specific authentication token. - -It is advised to execute this code snippet **only once**, separately from any other Python scripts. - .. note:: Using the default options will store the account in the *Strawberry Fields configuration directory* by creating a ``config.toml`` file. -Configuration for a project -*************************** - -The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a configuration file *only for this project*: +There might be cases when you would like to have a configuration different to the default. The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a configuration file *only for this project*: .. code-block:: python import strawberryfields as sf sf.store_account("MyToken", location="local") -where ``"MyToken"`` is the user specific authentication token. +.. warning:: + + It is advised to execute this code snippet **separately** from any other Python code, such that the authentication token is not shared accidentally. + +To check out more detailed examples visit the :func:`~.store_account` documentation. From f7d1e56ccd602aa7b53cad203c29d9a14e807da9 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:33:26 -0500 Subject: [PATCH 256/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 5e1a65d31..b9dd4bfcf 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -224,8 +224,20 @@ def store_account(authentication_token, filename="config.toml", location="user_c """Stores an account in a configuration file. The configuration file can be created in the following locations: - - user configuration directory ("user_config") - - current working directory ("local") + + - A global user configuration directory ("user_config") + - The current working directory ("local") + + This global user configuration directory differs depending on the operating system: + + * On Linux: ``~/.config/strawberryfields`` + * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` + * On MacOS: ``~/Library/Application Support/strawberryfields`` + + By default, Strawberry Fields will load the configuration and account credentials from the global + user configuration directory, no matter the working directory. However, if there exists a configuration + file in the *local* working directory, this takes precedence. The ``"local"`` option is therefore useful + for maintaining per-project configuration settings. Args: authentication_token (str): the authentication token to use when From 43bba9eebdfdd259b61014ca8ffd1b47ab5c4c97 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:40:40 -0500 Subject: [PATCH 257/335] Update configuration.rst --- doc/introduction/configuration.rst | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 4aa738a1f..8e9a953ec 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -39,12 +39,14 @@ Configuration options --------------------- **authentication_token (str)** (*required*) - The authentication token to use when connecting to the API. Will be sent with every request in - the header. Corresponding environment variable: ``SF_API_AUTHENTICATION_TOKEN`` + API token for authentication to the Xanadu Cloud platform. This is required + for submitting remote jobs using :class:`~.StarshipEngine`. Corresponding + environment variable: ``SF_API_AUTHENTICATION_TOKEN`` **hostname (str)** (*optional*) - The hostname of the server to connect to. Defaults to ``localhost``. Must be one of the allowed - hosts. Corresponding environment variable: ``SF_API_HOSTNAME`` + The hostname of the server to connect to. Defaults to ``localhost``. Must + be one of the allowed hosts. Corresponding environment variable: + ``SF_API_HOSTNAME`` **use_ssl (bool)** (*optional*) Whether to use SSL or not when connecting to the API. True or False. @@ -57,7 +59,8 @@ Configuration options Store your account ------------------ -Using the :func:`~.store_account` function, a configuration file can be created easily. It only requires specifying the authentication token. +Using the :func:`~.store_account` function, a configuration file can be created +easily. It only requires specifying the authentication token. In these examples ``"MyToken"`` contains the user-specific authentication token. @@ -68,9 +71,13 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. .. note:: - Using the default options will store the account in the *Strawberry Fields configuration directory* by creating a ``config.toml`` file. + By default, the account is stored in a ``config.toml`` file located in the + *Strawberry Fields configuration directory*. -There might be cases when you would like to have a configuration different to the default. The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a configuration file *only for this project*: +There might be cases when you would like to have a configuration different from the +the default. The following code snippet can be run in the *same directory* of a +Python script or Jupyter Notebook that uses Strawberry Fields to create a +configuration file *only for this project*: .. code-block:: python @@ -79,6 +86,8 @@ There might be cases when you would like to have a configuration different to th .. warning:: - It is advised to execute this code snippet **separately** from any other Python code, such that the authentication token is not shared accidentally. + It is advised to execute this code snippet **separately** from any other + Python code, such that the authentication token is not shared accidentally. -To check out more detailed examples visit the :func:`~.store_account` documentation. +To check out more detailed examples visit the :func:`~.store_account` +documentation. From 77e1c7b9723776e7a2507f8b70b9b7d20474704b Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:42:04 -0500 Subject: [PATCH 258/335] Update configuration.rst --- doc/introduction/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 8e9a953ec..b946d96bc 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -86,7 +86,7 @@ configuration file *only for this project*: .. warning:: - It is advised to execute this code snippet **separately** from any other + It is advised to call `store_account` **separately** from any other Python code, such that the authentication token is not shared accidentally. To check out more detailed examples visit the :func:`~.store_account` From f21a21cb1c77febf44f70d3bc6c4af410daede56 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:44:45 -0500 Subject: [PATCH 259/335] Update configuration.rst --- doc/introduction/configuration.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index b946d96bc..fc3cbef3f 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -84,10 +84,10 @@ configuration file *only for this project*: import strawberryfields as sf sf.store_account("MyToken", location="local") -.. warning:: - - It is advised to call `store_account` **separately** from any other - Python code, such that the authentication token is not shared accidentally. - To check out more detailed examples visit the :func:`~.store_account` documentation. + +.. warning:: + + It is advised to call ``store_account`` **separately** from any other + Python code, such that the authentication token is not shared accidentally. \ No newline at end of file From 80f4a724920de4acd9f31cfbf376e4f9b16fd9eb Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:44:58 -0500 Subject: [PATCH 260/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index b9dd4bfcf..969d6915a 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -240,9 +240,8 @@ def store_account(authentication_token, filename="config.toml", location="user_c for maintaining per-project configuration settings. Args: - authentication_token (str): the authentication token to use when - connecting to the API; it will be sent with every request in the - header + authentication_token (str): API token for authentication to the Xanadu Cloud platform. + This is required for submitting remote jobs using :class:`~.StarshipEngine`. Kwargs: location (str): determines where the configuration file should be saved From 211af5ad6c44c72028a06c2a74be0e92e80cf320 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:45:22 -0500 Subject: [PATCH 261/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 969d6915a..3d1317fa2 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -245,11 +245,10 @@ def store_account(authentication_token, filename="config.toml", location="user_c Kwargs: location (str): determines where the configuration file should be saved - filename (str): the name of the configuration file to look for - Configuration options are detailed in - :doc:`/introduction/configuration` +Additional configuration options are detailed in :doc:`/introduction/configuration` and can be passed +as keyword arguments. """ if location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") From fb4d7dacea769ceb891806fb025c81ba4fe840f9 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 12:47:57 -0500 Subject: [PATCH 262/335] Updating __init__ --- strawberryfields/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/strawberryfields/__init__.py b/strawberryfields/__init__.py index 649f87bae..119bd739e 100644 --- a/strawberryfields/__init__.py +++ b/strawberryfields/__init__.py @@ -27,6 +27,7 @@ from .io import load, save from .program import Program from .parameters import par_funcs as math +from .configuration import store_account __all__ = ["Engine", "StarshipEngine", "Program", "version", "save", "load", "about", "cite"] From 6940dd1b6791f9e8ea730512c27d2bc59d602366 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 13:03:53 -0500 Subject: [PATCH 263/335] Updating docstring to use raw strings --- strawberryfields/configuration.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 3d1317fa2..d769607f7 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -221,7 +221,7 @@ def parse_environment_variable(key, value): return value def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): - """Stores an account in a configuration file. + r"""Stores an account in a configuration file. The configuration file can be created in the following locations: @@ -247,8 +247,8 @@ def store_account(authentication_token, filename="config.toml", location="user_c location (str): determines where the configuration file should be saved filename (str): the name of the configuration file to look for -Additional configuration options are detailed in :doc:`/introduction/configuration` and can be passed -as keyword arguments. + Additional configuration options are detailed in :doc:`/introduction/configuration` and can be passed + as keyword arguments. """ if location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") From 23cbcfa54b8bea50979ebf002be7cf082f456c95 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 13:52:03 -0500 Subject: [PATCH 264/335] Change job status property to string instead of enum --- strawberryfields/api/connection.py | 10 +++++----- strawberryfields/api/job.py | 20 ++++++++++---------- strawberryfields/engine.py | 10 +++++----- tests/api/test_connection.py | 6 +++--- tests/api/test_starship_engine.py | 4 ++-- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 93131bdc2..19fa522c9 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -57,14 +57,14 @@ class Connection: >>> job >>> job.status - + "queued" >>> job.result AttributeError >>> job = connection.get_job(known_job_id) >>> job >>> job.status - + "complete" >>> job.result [[0 1 0 2 1 0 0 0]] @@ -201,16 +201,16 @@ def get_job(self, job_id: str) -> Job: "Failed to get job: {}".format(self._format_error_message(response)) ) - def get_job_status(self, job_id: str) -> JobStatus: + def get_job_status(self, job_id: str) -> str: """Returns the status of a job. Args: job_id (str): the job ID Returns: - strawberryfields.engine.JobStatus: the job status + str: the job status """ - return JobStatus(self.get_job(job_id).status) + return self.get_job(job_id).status def get_job_result(self, job_id: str) -> Result: """Returns the result of a job. diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index f88699e61..b1b965ad5 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -87,13 +87,13 @@ def id(self) -> str: return self._id @property - def status(self) -> JobStatus: + def status(self) -> str: """The job status. Returns: - strawberryfields.api.JobStatus + str """ - return self._status + return self._status.value @property def result(self) -> Result: @@ -105,10 +105,10 @@ def result(self) -> Result: Returns: strawberryfields.api.Result """ - if self.status != JobStatus.COMPLETED: + if self._status != JobStatus.COMPLETED: raise AttributeError( "The result is undefined for jobs that are not completed " - "(current status: {})".format(self.status.value) + "(current status: {})".format(self._status.value) ) return self._result @@ -118,8 +118,8 @@ def refresh(self): Refreshing only has an effect for open or queued jobs. """ - if self.status.is_final: - log.warning("A %s job cannot be refreshed", self.status.value) + if self._status.is_final: + log.warning("A %s job cannot be refreshed", self._status.value) return self._status = self._connection.get_job_status(self.id) if self._status == JobStatus.COMPLETED: @@ -130,15 +130,15 @@ def cancel(self): Only an open or queued job can be cancelled; an exception is raised otherwise. """ - if self.status.is_final: + if self._status.is_final: raise InvalidJobOperationError( - "A {} job cannot be cancelled".format(self.status.value) + "A {} job cannot be cancelled".format(self._status.value) ) self._connection.cancel_job(self.id) def __repr__(self): return "<{}: id={}, status={}>".format( - self.__class__.__name__, self.id, self.status.value + self.__class__.__name__, self.id, self._status.value ) def __str__(self): diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 8ab151b98..fa2d365f5 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -25,7 +25,7 @@ import numpy as np -from strawberryfields.api import Connection, Job, JobStatus, Result +from strawberryfields.api import Connection, Job, Result from strawberryfields.configuration import load_config from strawberryfields.program import Program @@ -487,12 +487,12 @@ class StarshipEngine: >>> job = engine.run_async(program, shots=1) >>> job.status - + "queued" >>> job.result InvalidJobOperationError >>> job.refresh() >>> job.status - + "complete" >>> job.result [[0 1 0 2 1 0 0 0]] @@ -563,9 +563,9 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: try: while True: job.refresh() - if job.status == JobStatus.COMPLETED: + if job.status == "complete": return job.result - if job.status == JobStatus.FAILED: + if job.status == "failed": log.warning( "The remote job failed due to an internal server error; " "please try again." diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index ee8c38318..a7ab9526c 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -76,7 +76,7 @@ def test_create_job(self, prog, connection, monkeypatch): job = connection.create_job("chip2", prog, 1) assert job.id == id_ - assert job.status == status + assert job.status == status.value def test_create_job_error(self, prog, connection, monkeypatch): """Tests a failed job creation flow.""" @@ -125,7 +125,7 @@ def test_get_job(self, connection, monkeypatch): job = connection.get_job(id_) assert job.id == id_ - assert job.status == status + assert job.status == status.value def test_get_job_error(self, connection, monkeypatch): """Tests a failed job request.""" @@ -144,7 +144,7 @@ def test_get_job_status(self, connection, monkeypatch): mock_return(MockResponse(200, {"id": id_, "status": status.value})), ) - assert connection.get_job_status(id_) == status + assert connection.get_job_status(id_) == status.value def test_get_job_status_error(self, connection, monkeypatch): """Tests a failed job status request.""" diff --git a/tests/api/test_starship_engine.py b/tests/api/test_starship_engine.py index 8be5357e9..e30a4d17d 100644 --- a/tests/api/test_starship_engine.py +++ b/tests/api/test_starship_engine.py @@ -83,12 +83,12 @@ def test_run_async(self, connection, prog, job_to_complete): engine = StarshipEngine("chip2", connection=connection) job = engine.run_async(prog) - assert job.status == JobStatus.OPEN + assert job.status == JobStatus.OPEN.value for _ in range(MockServer.REQUESTS_BEFORE_COMPLETED): job.refresh() - assert job.status == JobStatus.COMPLETED + assert job.status == JobStatus.COMPLETED.value assert np.array_equal(job.result.samples, np.array([[1, 2], [3, 4]])) with pytest.raises( From 3096218562ac518c5a6b3317eb3992a095cb1e06 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:02:46 -0500 Subject: [PATCH 265/335] Update strawberryfields/api/result.py Co-Authored-By: antalszava --- strawberryfields/api/result.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 5b732fca2..427526d8d 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -75,9 +75,7 @@ def __init__(self, samples, is_stateful=True): def samples(self): """Measurement samples. - Returned measurement samples will have shape ``(modes,)``. If multiple - shots are requested during execution, the returned measurement samples - will instead have shape ``(shots, modes)``. + Returned measurement samples will have shape ``(shots, modes)``. Returns: array[array[float, int]]: measurement samples returned from From 3817436b43e19b0c501a859186cbcf562d68c1a8 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:02:59 -0500 Subject: [PATCH 266/335] Update strawberryfields/engine.py Co-Authored-By: antalszava --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index fa2d365f5..3b6f17b27 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -468,7 +468,7 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): class StarshipEngine: - """A quantum program executor engine that that provides a simple interface for + """A quantum program executor engine that provides a simple interface for running remote jobs in a blocking or non-blocking manner. **Example:** From 2c1a44e50198af48583d149d0e31c47c03c78652 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:26:53 -0500 Subject: [PATCH 267/335] Clean up docstrings --- strawberryfields/api/__init__.py | 2 ++ strawberryfields/api/connection.py | 10 +++++----- strawberryfields/api/job.py | 7 ++++--- strawberryfields/engine.py | 2 +- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/strawberryfields/api/__init__.py b/strawberryfields/api/__init__.py index fe0c5dcfe..24f9c6c19 100644 --- a/strawberryfields/api/__init__.py +++ b/strawberryfields/api/__init__.py @@ -23,3 +23,5 @@ from .connection import Connection, RequestFailedError from .job import InvalidJobOperationError, Job, JobStatus from .result import Result + +__all__ = ["Connection", "Job", "Result"] diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 19fa522c9..c59dc4ec6 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -43,7 +43,7 @@ class Connection: For basic usage, it is not necessary to manually instantiate this object; the user is encouraged to use the higher-level interface provided by - :class:`~strawberryfields.engine.StarshipEngine`. + :class:`~strawberryfields.StarshipEngine`. **Example:** @@ -140,7 +140,7 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: shots (int): the number of shots Returns: - strawberryfields.engine.Job: the created job + strawberryfields.api.Job: the created job """ # Serialize a blackbird circuit for network transmission bb = to_blackbird(program) @@ -176,7 +176,7 @@ def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]: then ``after`` are returned Returns: - List[strawberryfields.engine.Job]: the jobs + List[strawberryfields.api.Job]: the jobs """ raise NotImplementedError("This feature is not yet implemented") @@ -187,7 +187,7 @@ def get_job(self, job_id: str) -> Job: job_id (str): the job ID Returns: - strawberryfields.engine.Job: the job + strawberryfields.api.Job: the job """ path = "/jobs/{}".format(job_id) response = requests.get(self._url(path), headers=self._headers) @@ -219,7 +219,7 @@ def get_job_result(self, job_id: str) -> Result: job_id (str): the job ID Returns: - strawberryfields.engine.Result: the job result + strawberryfields.api.Result: the job result """ path = "/jobs/{}/result".format(job_id) response = requests.get( diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index b1b965ad5..7f37e59b0 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -62,12 +62,13 @@ class Job: """Represents a remote job that can be queried for its status or result. This object should typically not be instantiated directly, but returned by an - ``Engine`` or ``Connection`` when a job is run. + :class:`strawberryfields.StarshipEngine` or :class:`strawberryfields.api.Connection` + when a job is run. Args: id_ (str): the job ID - status (strawberryfields.engine.JobStatus): the job status - connection (strawberryfields.engine.Connection): the connection over which the + status (strawberryfields.api.JobStatus): the job status + connection (strawberryfields.api.Connection): the connection over which the job is managed """ diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 3b6f17b27..67cf8abc2 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -587,7 +587,7 @@ def run_async(self, program: Program, shots: int = 1) -> Job: shots (int): the number of shots for which to run the job Returns: - strawberryfields.engine.Job: the created remote job + strawberryfields.api.Job: the created remote job """ return self._connection.create_job(self.target, program, shots) From bdd6c438199118d19f63554bc4c393b35f368ffd Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 15:34:20 -0500 Subject: [PATCH 268/335] update store_account logic with no directory case --- strawberryfields/configuration.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index d769607f7..7a55fee5e 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -252,6 +252,10 @@ def store_account(authentication_token, filename="config.toml", location="user_c """ if location == "user_config": directory = user_config_dir("strawberryfields", "Xanadu") + + # Create target Directory if it doesn't exist + if not os.path.exists(directory): + os.mkdir(directory) elif location == "local": directory = os.getcwd() else: From 24a1a187f9018403b3f6e7cb6b7143b30a7a26b7 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:35:00 -0500 Subject: [PATCH 269/335] Add new api package to docs --- doc/code/sf_api.rst | 14 ++++++++++++++ doc/index.rst | 1 + 2 files changed, 15 insertions(+) create mode 100644 doc/code/sf_api.rst diff --git a/doc/code/sf_api.rst b/doc/code/sf_api.rst new file mode 100644 index 000000000..58ffd0584 --- /dev/null +++ b/doc/code/sf_api.rst @@ -0,0 +1,14 @@ +sf.api +====== + +.. currentmodule:: strawberryfields.api + +.. warning:: + + Unless you are a Strawberry Fields developer, you likely do not need + to use these classes directly. + +.. automodapi:: strawberryfields.api + :no-heading: + :no-inheritance-diagram: + :include-all-objects: diff --git a/doc/index.rst b/doc/index.rst index c2a8b438f..2997204c5 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -184,3 +184,4 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/sf_program code/sf_program_utils code/sf_parameters + code/sf_api From 65956cde9a56f0bc134911c22d87842d262b55cf Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:38:04 -0500 Subject: [PATCH 270/335] Fixed a couple of outdated docstring types --- strawberryfields/engine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 67cf8abc2..af8ec5f21 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -498,7 +498,7 @@ class StarshipEngine: Args: target (str): the target device - connection (strawberryfields.engine.Connection): a connection to the remote job + connection (strawberryfields.api.Connection): a connection to the remote job execution platform """ @@ -556,7 +556,7 @@ def run(self, program: Program, shots: int = 1) -> Optional[Result]: shots (int): the number of shots for which to run the job Returns: - [strawberryfields.engine.Result, None]: the job result if successful, and + [strawberryfields.api.Result, None]: the job result if successful, and ``None`` otherwise """ job = self.run_async(program, shots) From 2ca57aaec7f842ed56b5968b130fc75742fe9844 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:38:48 -0500 Subject: [PATCH 271/335] Update strawberryfields/engine.py Co-Authored-By: Nathan Killoran --- strawberryfields/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index af8ec5f21..43ba1da9d 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -474,7 +474,7 @@ class StarshipEngine: **Example:** The following examples instantiate an engine with the default configuration, and - runs both blocking and non-blocking jobs. + run both blocking and non-blocking jobs. Run a blocking job: From 175b0353c13e5f0bdca58ddd930b3fafb6190ce9 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 15:57:50 -0500 Subject: [PATCH 272/335] Minor cleanup --- strawberryfields/api/job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index 7f37e59b0..fa5903e84 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -122,7 +122,7 @@ def refresh(self): if self._status.is_final: log.warning("A %s job cannot be refreshed", self._status.value) return - self._status = self._connection.get_job_status(self.id) + self._status = JobStatus(self._connection.get_job_status(self.id)) if self._status == JobStatus.COMPLETED: self._result = self._connection.get_job_result(self.id) From a5cf31d974dcd140de61b96ddb97207c7b307e5f Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 16:18:05 -0500 Subject: [PATCH 273/335] Add api package tests to makefile and travis config --- .travis.yml | 2 +- Makefile | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4a1883ec3..2bff03b77 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ env: - LOGGING=info matrix: include: - - env: OPTIONS="frontend or fock or gaussian or apps" + - env: OPTIONS="frontend or fock or gaussian or apps or api" - env: OPTIONS="tf and pure" - env: OPTIONS="tf and mixed" - env: BATCHED=1 OPTIONS="tf and pure" diff --git a/Makefile b/Makefile index bd0ab1f1c..093a81dd0 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,7 @@ docs: clean-docs: make -C doc clean -test: test-frontend test-gaussian test-fock test-tf batch-test-tf test-apps +test: test-frontend test-gaussian test-fock test-tf batch-test-tf test-apps test-api test-%: @echo "Testing $(subst test-,,$@) backend..." @@ -63,7 +63,7 @@ batch-test-%: @echo "Testing $(subst batch-test-,,$@) backend in batch mode..." export BATCHED=1 && $(PYTHON) $(TESTRUNNER) -m $(subst batch-test-,,"$@") -coverage: coverage-frontend coverage-gaussian coverage-fock coverage-tf batch-coverage-tf coverage-apps +coverage: coverage-frontend coverage-gaussian coverage-fock coverage-tf batch-coverage-tf coverage-apps coverage-api coverage-%: @echo "Generating coverage report for $(subst coverage-,,$@)..." From 8ddd9c2105ed47d11a4ce6c24126d4b85be9356b Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 16:25:30 -0500 Subject: [PATCH 274/335] Adding nested directory creation logic; adding tests --- strawberryfields/configuration.py | 3 +- tests/frontend/test_configuration.py | 48 +++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7a55fee5e..c0552c160 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -254,8 +254,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c directory = user_config_dir("strawberryfields", "Xanadu") # Create target Directory if it doesn't exist - if not os.path.exists(directory): - os.mkdir(directory) + os.makedirs(directory, exist_ok=True) elif location == "local": directory = os.getcwd() else: diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index d30cac379..1267511d2 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -189,7 +189,7 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") - m.setenv("SF_CONF", tmpdir) + m.setenv("SF_CONF", str(tmpdir)) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") config_filepath = conf.get_config_filepath(filename=test_filename) @@ -453,6 +453,30 @@ def test_location_not_recognized_error(self, monkeypatch, tmpdir, test_filename) ): conf.store_account(authentication_token, filename=test_filename, location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) + def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkeypatch, tmpdir, test_filename): + """Tests that an error is raised if the configuration file is supposed + to be created in non-existing directory when using user_config_dir and + if os.makedirs does not create the directory.""" + + with monkeypatch.context() as m: + m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + + + def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, tmpdir, test_filename): + """Tests that an error is raised if the configuration file is supposed + to be created in non-existing directory when using user_config_dir and + if os.makedirs does not create the directory.""" + + with monkeypatch.context() as m: + m.setattr(os, "makedirs", lambda a, **kwargs: None) + m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) + with pytest.raises( + FileNotFoundError, + match="No such file or directory", + ): + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + class TestStoreAccountIntegration: """Integration tests for the store_account function. @@ -484,6 +508,28 @@ def test_global(self, monkeypatch, tmpdir, test_filename): result = toml.load(filepath) assert result == EXPECTED_CONFIG + def test_directory_is_created(self, monkeypatch, tmpdir, test_filename): + + recursive_dir = tmpdir.join(".new_dir") + with monkeypatch.context() as m: + m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + + filepath = os.path.join(recursive_dir, test_filename) + result = toml.load(filepath) + assert result == EXPECTED_CONFIG + + def test_nested_directory_is_created(self, monkeypatch, tmpdir, test_filename): + + recursive_dir = tmpdir.join(".new_dir", "new_dir_again") + with monkeypatch.context() as m: + m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) + conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + + filepath = os.path.join(recursive_dir, test_filename) + result = toml.load(filepath) + assert result == EXPECTED_CONFIG + class TestSaveConfigToFile: """Tests for the store_account function.""" From 7b9f687bf7f831fa16d2232548ca4b02e4091e54 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 16:53:16 -0500 Subject: [PATCH 275/335] Reword warning message. --- doc/introduction/configuration.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 4acc7839f..5d264c406 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -75,6 +75,12 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. By default, the account is stored in a ``config.toml`` file located in the *Strawberry Fields configuration directory*. +.. warning:: + Typically, a user should only ever have to execute this code snippet once, when + initially configurating their system to connect to the Xanadu cloud platform. + It is advised to call ``store_account`` **separately** from any other + Python code, such that the authentication token is not shared accidentally. + There might be cases when you would like to have a configuration different from the the default. The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a @@ -88,7 +94,3 @@ configuration file *only for this project*: To check out more detailed examples visit the :func:`~.store_account` documentation. -.. warning:: - - It is advised to call ``store_account`` **separately** from any other - Python code, such that the authentication token is not shared accidentally. \ No newline at end of file From 644814fc80cb6660b29a23b15d39af85eec7eb3e Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 16:58:52 -0500 Subject: [PATCH 276/335] Mention committing the auth token --- doc/introduction/configuration.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 5d264c406..a845f4b38 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -79,7 +79,8 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. Typically, a user should only ever have to execute this code snippet once, when initially configurating their system to connect to the Xanadu cloud platform. It is advised to call ``store_account`` **separately** from any other - Python code, such that the authentication token is not shared accidentally. + Python code, such that the authentication token is not shared or committed + accidentally. There might be cases when you would like to have a configuration different from the the default. The following code snippet can be run in the *same directory* of a From 0e04cf4130bea4f079ff404484f6d61bb01521c9 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 17:03:52 -0500 Subject: [PATCH 277/335] Reword description of local config --- doc/introduction/configuration.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index a845f4b38..e809335e4 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -82,10 +82,9 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. Python code, such that the authentication token is not shared or committed accidentally. -There might be cases when you would like to have a configuration different from the -the default. The following code snippet can be run in the *same directory* of a +The following code snippet can be run in the *same directory* of a Python script or Jupyter Notebook that uses Strawberry Fields to create a -configuration file *only for this project*: +configuration file locally: .. code-block:: python From 34d1f0b8fa737990b0e1c9c5a33a78325949389e Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Thu, 27 Feb 2020 17:26:39 -0500 Subject: [PATCH 278/335] Adjust ordering in toctree to be alphabetical --- doc/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 2997204c5..947057ae8 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -174,6 +174,7 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/sf_apps code/sf_ops code/sf_utils + code/sf_api code/sf_backends code/sf_circuitspecs code/sf_circuitdrawer @@ -184,4 +185,3 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic code/sf_program code/sf_program_utils code/sf_parameters - code/sf_api From a3b841004792c0fa60957c7681aae5a53f9179dd Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 17:55:57 -0500 Subject: [PATCH 279/335] Adding examples to the docstring --- strawberryfields/configuration.py | 44 ++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index c0552c160..66a5e8806 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -221,7 +221,8 @@ def parse_environment_variable(key, value): return value def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): - r"""Stores an account in a configuration file. + r"""Configure Strawberry Fields for access to the Xanadu cloud platform by + saving your account credentials. The configuration file can be created in the following locations: @@ -239,6 +240,47 @@ def store_account(authentication_token, filename="config.toml", location="user_c file in the *local* working directory, this takes precedence. The ``"local"`` option is therefore useful for maintaining per-project configuration settings. + **Examples:** + + In these examples ``"MyToken"`` contains the user-specific authentication + token. + + >>> import strawberryfields as sf + >>> sf.store_account("MyToken") + + Creating the following ``"config.toml"`` file: + + .. code-block:: toml + + [api] + authentication_token = "MyToken" + hostname = "localhost" + use_ssl = true + port = 443 + + You can also create the configuration file locally (in the **current + working directory**) the following way: + + >>> import strawberryfields as sf + >>> sf.store_account("MyToken", location="local") + + Each of the configuration options (check out the + :doc:`/introduction/configuration` page for a list of options) can be + passed as further keyword arguments as well: + + >>> import strawberryfields as sf + >>> sf.store_account("MyToken", location="local", hostname="MyHost", use_ssl=False, port=123) + + Creating the following ``"config.toml"`` file in the **current working directory**: + + .. code-block:: toml + + [api] + authentication_token = "MyToken" + hostname = "MyHost" + use_ssl = false + port = 123 + Args: authentication_token (str): API token for authentication to the Xanadu Cloud platform. This is required for submitting remote jobs using :class:`~.StarshipEngine`. From 3e0ca150e35454f748e6df7981c4b305a9251581 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 17:57:54 -0500 Subject: [PATCH 280/335] Update docstring --- strawberryfields/configuration.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 66a5e8806..546a8db30 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -245,10 +245,12 @@ def store_account(authentication_token, filename="config.toml", location="user_c In these examples ``"MyToken"`` contains the user-specific authentication token. + The access to the Xanadu Cloud can be configured simply by running. + >>> import strawberryfields as sf >>> sf.store_account("MyToken") - Creating the following ``"config.toml"`` file: + This creates the following ``"config.toml"`` file: .. code-block:: toml @@ -271,7 +273,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c >>> import strawberryfields as sf >>> sf.store_account("MyToken", location="local", hostname="MyHost", use_ssl=False, port=123) - Creating the following ``"config.toml"`` file in the **current working directory**: + This creates the following ``"config.toml"`` file in the **current working directory**: .. code-block:: toml From 4c7cce133ea6615ef73c0d0edf07e0e14ee275bb Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 18:35:12 -0500 Subject: [PATCH 281/335] Reverting to having the test data in the tests instead of in fixtures --- tests/conftest.py | 12 ---- tests/frontend/test_configuration.py | 96 ++++++++++++++-------------- 2 files changed, 48 insertions(+), 60 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 4802ab0f6..435ef6732 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,18 +47,6 @@ pytest.param("gaussian", marks=pytest.mark.gaussian), ] -TEST_FILENAME = "test_config.toml" - -@pytest.fixture(scope="function") -def test_filename(): - """Using a test filename for the tests.""" - return TEST_FILENAME - -@pytest.fixture(scope="function") -def test_filepath(tmpdir): - """Using a test filepath for the tests.""" - return tmpdir.join(TEST_FILENAME) - if tf_available and tf.__version__[:3] == "1.3": from strawberryfields.backends.tfbackend import TFBackend diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 1267511d2..13b21cc86 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -75,11 +75,11 @@ def test_not_found_warning(self, caplog): conf.load_config(filename='NotAFileName') assert "No Strawberry Fields configuration file found." in caplog.text - def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir, test_filepath): + def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): """Test that the keyword arguments passed to load_config take precedence over data in environment variables or data in a configuration file.""" - with open(test_filepath, "w") as f: + with open(tmpdir.join("test_config.toml"), "w") as f: f.write(TEST_FILE) with monkeypatch.context() as m: @@ -98,10 +98,10 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir, tes assert configuration == OTHER_EXPECTED_CONFIG - def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir, test_filepath): + def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, tmpdir): """Test that the data in environment variables take precedence over data in a configuration file.""" - with open(test_filepath, "w") as f: + with open(tmpdir.join("test_config.toml"), "w") as f: f.write(TEST_FILE) with monkeypatch.context() as m: @@ -117,11 +117,11 @@ def test_environment_variables_take_precedence_over_conf_file(self, monkeypatch, assert configuration == OTHER_EXPECTED_CONFIG - def test_conf_file_loads_well(self, monkeypatch, tmpdir, test_filepath): + def test_conf_file_loads_well(self, monkeypatch, tmpdir): """Test that the load_config function loads a configuration from a TOML file correctly.""" - filename = test_filepath + filename = tmpdir.join("config.toml") with open(filename, "w") as f: f.write(TEST_FILE) @@ -175,13 +175,13 @@ def test_current_directory(self, tmpdir, monkeypatch): assert config_filepath == tmpdir.join(filename) - def test_env_variable(self, monkeypatch, tmpdir, test_filename, test_filepath): + def test_env_variable(self, monkeypatch, tmpdir): """Test that the correct configuration file is found using the correct environment variable (SF_CONF). This is a test case for when there is no configuration file in the current directory.""" - with open(test_filepath, "w") as f: + with open(tmpdir.join("config.toml"), "w") as f: f.write(TEST_FILE) def raise_wrapper(ex): @@ -192,11 +192,11 @@ def raise_wrapper(ex): m.setenv("SF_CONF", str(tmpdir)) m.setattr(conf, "user_config_dir", lambda *args: "NotTheFileName") - config_filepath = conf.get_config_filepath(filename=test_filename) + config_filepath = conf.get_config_filepath(filename="config.toml") - assert config_filepath == test_filepath + assert config_filepath == tmpdir.join("config.toml") - def test_user_config_dir(self, monkeypatch, tmpdir, test_filename, test_filepath): + def test_user_config_dir(self, monkeypatch, tmpdir): """Test that the correct configuration file is found using the correct argument to the user_config_dir function. @@ -204,7 +204,7 @@ def test_user_config_dir(self, monkeypatch, tmpdir, test_filename, test_filepath -in the current directory or -in the directory contained in the corresponding environment variable.""" - with open(test_filepath, "w") as f: + with open(tmpdir.join("config.toml"), "w") as f: f.write(TEST_FILE) def raise_wrapper(ex): @@ -215,11 +215,11 @@ def raise_wrapper(ex): m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") - config_filepath = conf.get_config_filepath(filename=test_filename) + config_filepath = conf.get_config_filepath(filename="config.toml") - assert config_filepath == test_filepath + assert config_filepath == tmpdir.join("config.toml") - def test_no_config_file_found_returns_none(self, monkeypatch, tmpdir, test_filename): + def test_no_config_file_found_returns_none(self, monkeypatch, tmpdir): """Test that the get_config_filepath returns None if the configuration file is nowhere to be found. @@ -236,16 +236,16 @@ def raise_wrapper(ex): m.setenv("SF_CONF", "NoConfigFileHere") m.setattr(conf, "user_config_dir", lambda *args: "NoConfigFileHere") - config_filepath = conf.get_config_filepath(filename=test_filename) + config_filepath = conf.get_config_filepath(filename="config.toml") assert config_filepath is None class TestLoadConfigFile: """Tests the load_config_file function.""" - def test_load_config_file(self, monkeypatch, tmpdir, test_filename, test_filepath): + def test_load_config_file(self, monkeypatch, tmpdir): """Tests that configuration is loaded correctly from a TOML file.""" - filename = test_filepath + filename = tmpdir.join("test_config.toml") with open(filename, "w") as f: f.write(TEST_FILE) @@ -254,10 +254,10 @@ def test_load_config_file(self, monkeypatch, tmpdir, test_filename, test_filepat assert loaded_config == EXPECTED_CONFIG - def test_loading_absolute_path(self, monkeypatch, tmpdir, test_filename, test_filepath): + def test_loading_absolute_path(self, monkeypatch, tmpdir): """Test that the default configuration file can be loaded via an absolute path.""" - filename = os.path.abspath(test_filepath) + filename = tmpdir.join("test_config.toml") with open(filename, "w") as f: @@ -407,7 +407,7 @@ def mock_create_config(authentication_token="", **kwargs): class TestStoreAccount: """Tests for the store_account function.""" - def test_config_created_locally(self, monkeypatch, tmpdir, test_filename): + def test_config_created_locally(self, monkeypatch, tmpdir): """Tests that a configuration file was created in the current directory.""" mock_save_config_file = MockSaveConfigToFile() @@ -420,12 +420,12 @@ def test_config_created_locally(self, monkeypatch, tmpdir, test_filename): m.setattr(conf, "user_config_dir", lambda *args: "NotTheCorrectDir") m.setattr(conf, "create_config", mock_create_config) m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) - conf.store_account(authentication_token, filename=test_filename, location="local", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS) assert mock_save_config_file.config == EXPECTED_CONFIG - assert mock_save_config_file.path == tmpdir.join(test_filename) + assert mock_save_config_file.path == tmpdir.join("config.toml") - def test_global_config_created(self, monkeypatch, tmpdir, test_filename): + def test_global_config_created(self, monkeypatch, tmpdir): """Tests that a configuration file was created in the user configuration directory for Strawberry Fields.""" mock_save_config_file = MockSaveConfigToFile() @@ -438,12 +438,12 @@ def test_global_config_created(self, monkeypatch, tmpdir, test_filename): m.setattr(conf, "user_config_dir", lambda *args: tmpdir) m.setattr(conf, "create_config", mock_create_config) m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) assert mock_save_config_file.config == EXPECTED_CONFIG - assert mock_save_config_file.path == tmpdir.join(test_filename) + assert mock_save_config_file.path == tmpdir.join("config.toml") - def test_location_not_recognized_error(self, monkeypatch, tmpdir, test_filename): + def test_location_not_recognized_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed to be created in an unrecognized directory.""" @@ -451,19 +451,19 @@ def test_location_not_recognized_error(self, monkeypatch, tmpdir, test_filename) conf.ConfigurationError, match="This location is not recognized.", ): - conf.store_account(authentication_token, filename=test_filename, location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) - def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkeypatch, tmpdir, test_filename): + def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed to be created in non-existing directory when using user_config_dir and if os.makedirs does not create the directory.""" with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) - def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, tmpdir, test_filename): + def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed to be created in non-existing directory when using user_config_dir and if os.makedirs does not create the directory.""" @@ -475,7 +475,7 @@ def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, FileNotFoundError, match="No such file or directory", ): - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) class TestStoreAccountIntegration: """Integration tests for the store_account function. @@ -484,68 +484,68 @@ class TestStoreAccountIntegration: directory. """ - def test_local(self, monkeypatch, tmpdir, test_filename): + def test_local(self, monkeypatch, tmpdir): """Tests that the functions integrate correctly when storing account locally.""" with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) - conf.store_account(authentication_token, filename=test_filename, location="local", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS) - filepath = tmpdir.join(test_filename) + filepath = tmpdir.join("config.toml") result = toml.load(filepath) assert result == EXPECTED_CONFIG - def test_global(self, monkeypatch, tmpdir, test_filename): + def test_global(self, monkeypatch, tmpdir): """Tests that the functions integrate correctly when storing account globally.""" with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: tmpdir) - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) - filepath = tmpdir.join(test_filename) + filepath = tmpdir.join("config.toml") result = toml.load(filepath) assert result == EXPECTED_CONFIG - def test_directory_is_created(self, monkeypatch, tmpdir, test_filename): + def test_directory_is_created(self, monkeypatch, tmpdir): recursive_dir = tmpdir.join(".new_dir") with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) - filepath = os.path.join(recursive_dir, test_filename) + filepath = os.path.join(recursive_dir, "config.toml") result = toml.load(filepath) assert result == EXPECTED_CONFIG - def test_nested_directory_is_created(self, monkeypatch, tmpdir, test_filename): + def test_nested_directory_is_created(self, monkeypatch, tmpdir): recursive_dir = tmpdir.join(".new_dir", "new_dir_again") with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) - conf.store_account(authentication_token, filename=test_filename, location="user_config", **DEFAULT_KWARGS) + conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) - filepath = os.path.join(recursive_dir, test_filename) + filepath = os.path.join(recursive_dir, "config.toml") result = toml.load(filepath) assert result == EXPECTED_CONFIG class TestSaveConfigToFile: """Tests for the store_account function.""" - def test_correct(self, tmpdir, test_filename): + def test_correct(self, tmpdir): """Test saving a configuration file.""" - filepath = str(tmpdir.join(test_filename)) + filepath = str(tmpdir.join("config.toml")) conf.save_config_to_file(OTHER_EXPECTED_CONFIG, filepath) result = toml.load(filepath) assert result == OTHER_EXPECTED_CONFIG - def test_file_already_existed(self, tmpdir, test_filename): + def test_file_already_existed(self, tmpdir): """Test saving a configuration file even if the file already existed.""" - filepath = str(tmpdir.join(test_filename)) + filepath = str(tmpdir.join("config.toml")) with open(filepath, "w") as f: f.write(TEST_FILE) From 6aea2fab985051e67db2821c962b4e4972928a15 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 18:39:02 -0500 Subject: [PATCH 282/335] cloud -> Cloud --- doc/introduction/configuration.rst | 2 +- strawberryfields/configuration.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index e809335e4..48f1fc0f8 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -77,7 +77,7 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. .. warning:: Typically, a user should only ever have to execute this code snippet once, when - initially configurating their system to connect to the Xanadu cloud platform. + initially configurating their system to connect to the Xanadu Cloud platform. It is advised to call ``store_account`` **separately** from any other Python code, such that the authentication token is not shared or committed accidentally. diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 546a8db30..4efb4a621 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -221,7 +221,7 @@ def parse_environment_variable(key, value): return value def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): - r"""Configure Strawberry Fields for access to the Xanadu cloud platform by + r"""Configure Strawberry Fields for access to the Xanadu Cloud platform by saving your account credentials. The configuration file can be created in the following locations: From f344807e7dd03d10c5d43b912d7340cc06154021 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 18:40:27 -0500 Subject: [PATCH 283/335] Rewording --- doc/introduction/configuration.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 48f1fc0f8..de1386167 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -82,9 +82,9 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. Python code, such that the authentication token is not shared or committed accidentally. -The following code snippet can be run in the *same directory* of a -Python script or Jupyter Notebook that uses Strawberry Fields to create a -configuration file locally: +The following code snippet can be run to create a configuration file locally in +the *same directory* of a Python script or Jupyter Notebook that uses +Strawberry Fields: .. code-block:: python From cdfde5527b6fcb74a045f10622e15ace7df6b784 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 18:47:35 -0500 Subject: [PATCH 284/335] Revert test changes, minor modifications --- strawberryfields/configuration.py | 9 +++++---- tests/frontend/test_configuration.py | 19 ++++++++++++------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 4efb4a621..55eb669ce 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -245,7 +245,8 @@ def store_account(authentication_token, filename="config.toml", location="user_c In these examples ``"MyToken"`` contains the user-specific authentication token. - The access to the Xanadu Cloud can be configured simply by running. + The access to the Xanadu Cloud can be configured simply by running the + following Python code: >>> import strawberryfields as sf >>> sf.store_account("MyToken") @@ -266,9 +267,9 @@ def store_account(authentication_token, filename="config.toml", location="user_c >>> import strawberryfields as sf >>> sf.store_account("MyToken", location="local") - Each of the configuration options (check out the - :doc:`/introduction/configuration` page for a list of options) can be - passed as further keyword arguments as well: + Each of the configuration options can be passed as further keyword + arguments as well (check out the :doc:`/introduction/configuration` page + for a list of options): >>> import strawberryfields as sf >>> sf.store_account("MyToken", location="local", hostname="MyHost", use_ssl=False, port=123) diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 13b21cc86..1b979577c 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -181,7 +181,12 @@ def test_env_variable(self, monkeypatch, tmpdir): This is a test case for when there is no configuration file in the current directory.""" - with open(tmpdir.join("config.toml"), "w") as f: + + filename = "config.toml" + + path_to_write_file = tmpdir.join(filename) + + with open(path_to_write_file, "w") as f: f.write(TEST_FILE) def raise_wrapper(ex): @@ -455,8 +460,8 @@ def test_location_not_recognized_error(self, monkeypatch, tmpdir): def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed - to be created in non-existing directory when using user_config_dir and - if os.makedirs does not create the directory.""" + to be created in a non-existing directory when using user_config_dir + and if os.makedirs does not create the directory.""" with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) @@ -465,8 +470,8 @@ def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkey def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed - to be created in non-existing directory when using user_config_dir and - if os.makedirs does not create the directory.""" + to be created in a non-existing directory when using user_config_dir + and if os.makedirs does not create the directory.""" with monkeypatch.context() as m: m.setattr(os, "makedirs", lambda a, **kwargs: None) @@ -480,8 +485,8 @@ def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, class TestStoreAccountIntegration: """Integration tests for the store_account function. - Mocking takes place only such that writing can be done in the temporary - directory. + Mocking takes place only such that writing can be done in pytest's + temporary directory. """ def test_local(self, monkeypatch, tmpdir): From 7a754d5eb2b26cb729d4e2c5e5e86cb5a69c821f Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:25:25 -0500 Subject: [PATCH 285/335] Update doc/introduction/configuration.rst Co-Authored-By: Josh Izaac --- doc/introduction/configuration.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index de1386167..f0c984290 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -60,7 +60,7 @@ Store your account ------------------ Using the :func:`~.store_account` function, a configuration file containing your Xanadu Cloud credentials -can be created easily. By default, this configuration file is saved *globally*, and will be used every time +will be created. By default, this configuration file is saved *globally*, and will be used every time a remote job is submitted. In these examples ``"MyToken"`` contains the user-specific authentication token. @@ -93,4 +93,3 @@ Strawberry Fields: To check out more detailed examples visit the :func:`~.store_account` documentation. - From 95e24471a84615caaca0df2ce83cb7a2d797efa8 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:33:42 -0500 Subject: [PATCH 286/335] Update doc/introduction/configuration.rst Co-Authored-By: Josh Izaac --- doc/introduction/configuration.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index f0c984290..5eba3b603 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -76,11 +76,12 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. *Strawberry Fields configuration directory*. .. warning:: - Typically, a user should only ever have to execute this code snippet once, when - initially configurating their system to connect to the Xanadu Cloud platform. - It is advised to call ``store_account`` **separately** from any other - Python code, such that the authentication token is not shared or committed - accidentally. + + The ``store_account`` function only needs to be executed once, when + initially configuring your system to connect to the Xanadu cloud platform. + + Take care not to share or publicly commit your authentication token, as it provides + full access to your account. The following code snippet can be run to create a configuration file locally in the *same directory* of a Python script or Jupyter Notebook that uses From 5a218810dc4be036e4a15375d1f3dd4cd9a0d601 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:34:02 -0500 Subject: [PATCH 287/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 55eb669ce..0ee42bb55 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -268,7 +268,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c >>> sf.store_account("MyToken", location="local") Each of the configuration options can be passed as further keyword - arguments as well (check out the :doc:`/introduction/configuration` page + arguments as well (see the :doc:`/introduction/configuration` page for a list of options): >>> import strawberryfields as sf From 0d9a694b65bc21ea3e33cdb31200a116df209c8d Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:34:24 -0500 Subject: [PATCH 288/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 0ee42bb55..2e8eac887 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -245,8 +245,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c In these examples ``"MyToken"`` contains the user-specific authentication token. - The access to the Xanadu Cloud can be configured simply by running the - following Python code: + Access to the Xanadu cloud can be configured as follows: >>> import strawberryfields as sf >>> sf.store_account("MyToken") From 5f6aa730debfa3d935ee95f2989a74fdf1937588 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:34:45 -0500 Subject: [PATCH 289/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 1 - 1 file changed, 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 2e8eac887..f5c8524a0 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -247,7 +247,6 @@ def store_account(authentication_token, filename="config.toml", location="user_c Access to the Xanadu cloud can be configured as follows: - >>> import strawberryfields as sf >>> sf.store_account("MyToken") This creates the following ``"config.toml"`` file: From 081264e8902fdede392afb14968cec0f7552d597 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:35:01 -0500 Subject: [PATCH 290/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index f5c8524a0..a0f48ba45 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -242,7 +242,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c **Examples:** - In these examples ``"MyToken"`` contains the user-specific authentication + In these examples ``"MyToken"`` should be replaced with a valid authentication token. Access to the Xanadu cloud can be configured as follows: From aa460462f09d86ba11473f6117075c09383a8d71 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:35:16 -0500 Subject: [PATCH 291/335] Update strawberryfields/configuration.py Co-Authored-By: Josh Izaac --- strawberryfields/configuration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index a0f48ba45..5583843d5 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -226,8 +226,8 @@ def store_account(authentication_token, filename="config.toml", location="user_c The configuration file can be created in the following locations: - - A global user configuration directory ("user_config") - - The current working directory ("local") + - A global user configuration directory (``"user_config"``) + - The current working directory (``"local"``) This global user configuration directory differs depending on the operating system: From 08c83b81f9cdb6423f551032abab83de1aa769c5 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:35:33 -0500 Subject: [PATCH 292/335] Update doc/introduction/configuration.rst Co-Authored-By: Josh Izaac --- doc/introduction/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 5eba3b603..f82859c83 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -92,5 +92,5 @@ Strawberry Fields: import strawberryfields as sf sf.store_account("MyToken", location="local") -To check out more detailed examples visit the :func:`~.store_account` +For more detailed examples, visit the :func:`~.store_account` documentation. From 6ee1876177c8650231a0cfa73fa3d70bb89a741d Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:35:52 -0500 Subject: [PATCH 293/335] Update doc/introduction/configuration.rst Co-Authored-By: Josh Izaac --- doc/introduction/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index f82859c83..69d880989 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -63,7 +63,7 @@ Using the :func:`~.store_account` function, a configuration file containing your will be created. By default, this configuration file is saved *globally*, and will be used every time a remote job is submitted. -In these examples ``"MyToken"`` contains the user-specific authentication token. +In these examples ``"MyToken"`` should be replaced with a valid authentication token. .. code-block:: python From aff02bc1d9dcd3a629f5e994c73bd941035524c6 Mon Sep 17 00:00:00 2001 From: antalszava Date: Thu, 27 Feb 2020 21:36:47 -0500 Subject: [PATCH 294/335] Modifications from comments --- doc/introduction/configuration.rst | 10 +++++----- strawberryfields/configuration.py | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index de1386167..812ea0cd2 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -29,7 +29,7 @@ and has the following format: .. code-block:: toml [api] - # Options for the Strawberry Fields Cloud API + # Options for the Strawberry Fields cloud API authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" hostname = "localhost" use_ssl = true @@ -39,7 +39,7 @@ Configuration options --------------------- **authentication_token (str)** (*required*) - API token for authentication to the Xanadu Cloud platform. This is required + API token for authentication to the Xanadu cloud platform. This is required for submitting remote jobs using :class:`~.StarshipEngine`. Corresponding environment variable: ``SF_API_AUTHENTICATION_TOKEN`` @@ -59,11 +59,11 @@ Configuration options Store your account ------------------ -Using the :func:`~.store_account` function, a configuration file containing your Xanadu Cloud credentials +Using the :func:`~.store_account` function, a configuration file containing your Xanadu cloud credentials can be created easily. By default, this configuration file is saved *globally*, and will be used every time a remote job is submitted. -In these examples ``"MyToken"`` contains the user-specific authentication token. +In these examples ``"MyToken"`` contains the authentication token. .. code-block:: python @@ -77,7 +77,7 @@ In these examples ``"MyToken"`` contains the user-specific authentication token. .. warning:: Typically, a user should only ever have to execute this code snippet once, when - initially configurating their system to connect to the Xanadu Cloud platform. + initially configurating their system to connect to the Xanadu cloud platform. It is advised to call ``store_account`` **separately** from any other Python code, such that the authentication token is not shared or committed accidentally. diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 55eb669ce..9697faf05 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -221,7 +221,7 @@ def parse_environment_variable(key, value): return value def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): - r"""Configure Strawberry Fields for access to the Xanadu Cloud platform by + r"""Configure Strawberry Fields for access to the Xanadu cloud platform by saving your account credentials. The configuration file can be created in the following locations: @@ -245,7 +245,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c In these examples ``"MyToken"`` contains the user-specific authentication token. - The access to the Xanadu Cloud can be configured simply by running the + The access to the Xanadu cloud can be configured simply by running the following Python code: >>> import strawberryfields as sf @@ -285,7 +285,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c port = 123 Args: - authentication_token (str): API token for authentication to the Xanadu Cloud platform. + authentication_token (str): API token for authentication to the Xanadu cloud platform. This is required for submitting remote jobs using :class:`~.StarshipEngine`. Kwargs: From 6f813e0ce03c95a68772f793497e487f2371f866 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Fri, 28 Feb 2020 13:26:23 +1030 Subject: [PATCH 295/335] Fixes a bug in the MZgate convention by reversing the internal and external phase arguments. (#301) * update MZgate * update changelog * Update .github/CHANGELOG.md Co-Authored-By: Josh Izaac * added tests * remove chip0 test * fix failing test * Apply suggestions from code review * swap comment in template * added test with non-standard angles Co-authored-by: Nathan Killoran --- .github/CHANGELOG.md | 16 +- strawberryfields/circuitspecs/__init__.py | 3 +- strawberryfields/circuitspecs/chip0.py | 197 ----------- strawberryfields/circuitspecs/chip2.py | 4 +- strawberryfields/ops.py | 14 +- tests/frontend/test_circuitspecs_chip0.py | 380 ---------------------- tests/frontend/test_circuitspecs_chip2.py | 133 +++++++- tests/frontend/test_engine.py | 24 +- 8 files changed, 161 insertions(+), 610 deletions(-) delete mode 100644 strawberryfields/circuitspecs/chip0.py delete mode 100644 tests/frontend/test_circuitspecs_chip0.py diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index c32c4aee4..95df59f88 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -19,14 +19,14 @@ through decomposition, using The Walrus for fast computation. [#289](https://github.com/XanaduAI/strawberryfields/pull/289) -* Added The Walrus implementations for the displacement, squeezing and beamsplitter +* Added The Walrus implementations for the displacement, squeezing and beamsplitter operations to improve speed. [#287](https://github.com/XanaduAI/strawberryfields/pull/287) - + * Added custom tensor contractions for the beamsplitter and the two-mode squeeze gate as well as faster application of diagonal gate matrices. [#292](https://github.com/XanaduAI/strawberryfields/pull/292) - + * Moved apply-gate functions to `Circuit` class, and removed `apply_gate_einsum` and `Circuits._apply_gate`, since they were no longer used. [#293](https://github.com/XanaduAI/strawberryfields/pull/293/) @@ -49,10 +49,16 @@ * Added `sympy>=1.5` to the list of dependencies. Removed the `sympy.functions.atan2` workaround now that SymPy has been fixed. [#280](https://github.com/XanaduAI/strawberryfields/pull/280) - + * Removed two unnecessary else statements that pylint complained about. [#290](https://github.com/XanaduAI/strawberryfields/pull/290) - + +* Fixed a bug in the `MZgate`, where the internal and external phases were + in the wrong order in both the docstring and the argument list. The new + signature is `MZgate(phase_in, phase_ex)`, matching the existing `rectangular_symmetric` + decomposition. + [(#301)](https://github.com/XanaduAI/strawberryfields/pull/301) + ### Contributors diff --git a/strawberryfields/circuitspecs/__init__.py b/strawberryfields/circuitspecs/__init__.py index abd70d658..93ebabe12 100644 --- a/strawberryfields/circuitspecs/__init__.py +++ b/strawberryfields/circuitspecs/__init__.py @@ -37,7 +37,6 @@ executed on that backend. """ from .circuit_specs import CircuitSpecs -from .chip0 import Chip0Specs from .chip2 import Chip2Specs from .fock import FockSpecs from .gaussian import GaussianSpecs @@ -45,7 +44,7 @@ from .tensorflow import TFSpecs from .gaussian_unitary import GaussianUnitary -specs = (Chip0Specs, Chip2Specs, FockSpecs, GaussianSpecs, GBSSpecs, TFSpecs, GaussianUnitary) +specs = (Chip2Specs, FockSpecs, GaussianSpecs, GBSSpecs, TFSpecs, GaussianUnitary) circuit_db = {c.short_name: c for c in specs} """dict[str, ~strawberryfields.circuitspecs.CircuitSpecs]: Map from circuit family short name to the corresponding class.""" diff --git a/strawberryfields/circuitspecs/chip0.py b/strawberryfields/circuitspecs/chip0.py deleted file mode 100644 index 16f971c8b..000000000 --- a/strawberryfields/circuitspecs/chip0.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Circuit class specification for the chip0 class of circuits.""" -import textwrap - -import numpy as np -from numpy.linalg import multi_dot -from scipy.linalg import block_diag - -from strawberryfields.program_utils import CircuitError, Command, group_operations -from strawberryfields.parameters import par_evaluate -import strawberryfields.ops as ops - -from .circuit_specs import CircuitSpecs -from .gbs import GBSSpecs - - -class Chip0Specs(CircuitSpecs): - """Circuit specifications for the chip0 class of circuits.""" - - short_name = "chip0" - modes = 4 - remote = True - local = True - interactive = False - - primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} - decompositions = { - "Interferometer": {"mesh": "rectangular_symmetric", "drop_identity": False}, - "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False}, - "MZgate": {}, - } - - circuit = textwrap.dedent( - """\ - name template_2x2_chip0 - version 1.0 - target chip0 (shots=10) - - # for n spatial degrees, first n signal modes, then n idler modes, phase zero - S2gate({squeezing_amplitude_0}, 0.0) | [0, 2] - S2gate({squeezing_amplitude_1}, 0.0) | [1, 3] - - # standard 2x2 interferometer for the signal modes (the lower ones in frequency) - Rgate({external_phase_0}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - Rgate({internal_phase_0}) | [0] - BSgate(pi/4, pi/2) | [0, 1] - - #duplicate the interferometer for the idler modes (the higher ones in frequency) - Rgate({external_phase_0}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - Rgate({internal_phase_0}) | [2] - BSgate(pi/4, pi/2) | [2, 3] - - # final local phases - Rgate({final_phase_0}) | 0 - Rgate({final_phase_1}) | 1 - Rgate({final_phase_2}) | 2 - Rgate({final_phase_3}) | 3 - - # Measurement in Fock basis - MeasureFock() | [0, 1, 2, 3] - """ - ) - - def compile(self, seq, registers): - """Try to arrange a quantum circuit into a form suitable for Chip0. - - Args: - seq (Sequence[Command]): quantum circuit to modify - registers (Sequence[RegRefs]): quantum registers - Returns: - List[Command]: modified circuit - Raises: - CircuitError: the circuit does not correspond to Chip0 - """ - # pylint: disable=too-many-statements,too-many-branches - # First, check if provided sequence matches the circuit template. - # This will avoid superfluous compilation if the user is using the - # template directly. - try: - seq = super().compile(seq, registers) - except CircuitError: - # failed topology check. Continue to more general - # compilation below. - pass - else: - return seq - - # first do general GBS compilation to make sure - # Fock measurements are correct - # --------------------------------------------- - seq = GBSSpecs().compile(seq, registers) - A, B, C = group_operations(seq, lambda x: isinstance(x, ops.MeasureFock)) - - if len(B[0].reg) != self.modes: - raise CircuitError("All modes must be measured.") - - # Check circuit begins with two mode squeezers - # -------------------------------------------- - A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate)) - - if A: - raise CircuitError("Circuits must start with two S2gates.") - - # get circuit registers - regrefs = {q for cmd in B for q in cmd.reg} - - if len(regrefs) != self.modes: - raise CircuitError("S2gates do not appear on the correct modes.") - - # Compile the unitary: combine and then decompose all unitaries - # ------------------------------------------------------------- - A, B, C = group_operations(seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate))) - - # begin unitary lists for mode [0, 1] and modes [2, 3] with - # two identity matrices. This is because multi_dot requires - # at least two matrices in the list. - U_list01 = [np.identity(self.modes // 2, dtype=np.complex128)] * 2 - U_list23 = [np.identity(self.modes // 2, dtype=np.complex128)] * 2 - - if not B: - # no interferometer was applied - A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate)) - A = B # move the S2gates to A - else: - for cmd in B: - # calculate the unitary matrix representing each - # rotation gate and each beamsplitter - # Note: this is done separately on modes [0, 1] - # and modes [2, 3] - modes = [i.ind for i in cmd.reg] - params = par_evaluate(cmd.op.p) - U = np.identity(self.modes // 2, dtype=np.complex128) - - if isinstance(cmd.op, ops.Rgate): - m = modes[0] - U[m % 2, m % 2] = np.exp(1j * params[0]) - - elif isinstance(cmd.op, ops.BSgate): - m, n = modes - - t = np.cos(params[0]) - r = np.exp(1j * params[1]) * np.sin(params[0]) - - U[m % 2, m % 2] = t - U[m % 2, n % 2] = -np.conj(r) - U[n % 2, m % 2] = r - U[n % 2, n % 2] = t - - if set(modes).issubset({0, 1}): - U_list01.insert(0, U) - elif set(modes).issubset({2, 3}): - U_list23.insert(0, U) - else: - raise CircuitError( - "Unitary must be applied separately to modes [0, 1] and modes [2, 3]." - ) - - # multiply all unitaries together, to get the final - # unitary representation on modes [0, 1] and [2, 3]. - U01 = multi_dot(U_list01) - U23 = multi_dot(U_list23) - - # check unitaries are equal - if not np.allclose(U01, U23): - raise CircuitError( - "Interferometer on modes [0, 1] must be identical to interferometer on modes [2, 3]." - ) - - U = block_diag(U01, U23) - - # replace B with an interferometer - B = [ - Command(ops.Interferometer(U01), registers[:2]), - Command(ops.Interferometer(U23), registers[2:]), - ] - - # decompose the interferometer, using Mach-Zehnder interferometers - B = self.decompose(B) - - # Do a final circuit topology check - # --------------------------------- - seq = super().compile(A + B + C, registers) - return seq diff --git a/strawberryfields/circuitspecs/chip2.py b/strawberryfields/circuitspecs/chip2.py index b960d7e88..dc7388f9c 100644 --- a/strawberryfields/circuitspecs/chip2.py +++ b/strawberryfields/circuitspecs/chip2.py @@ -57,8 +57,8 @@ class Chip2Specs(CircuitSpecs): S2gate({squeezing_amplitude_3}, 0.0) | [3, 7] # standard 4x4 interferometer for the signal modes (the lower ones in frequency) - # even phase indices correspond to external Mach-Zehnder interferometer phases - # odd phase indices correspond to internal Mach-Zehnder interferometer phases + # even phase indices correspond to internal Mach-Zehnder interferometer phases + # odd phase indices correspond to external Mach-Zehnder interferometer phases MZgate({phase_0}, {phase_1}) | [0, 1] MZgate({phase_2}, {phase_3}) | [2, 3] MZgate({phase_4}, {phase_5}) | [1, 2] diff --git a/strawberryfields/ops.py b/strawberryfields/ops.py index 378c52bae..3e21e4e27 100644 --- a/strawberryfields/ops.py +++ b/strawberryfields/ops.py @@ -1100,25 +1100,25 @@ class MZgate(Gate): .. math:: - \mathrm{MZ}(\phi_{ex}, \phi_{in}) = BS\left(\frac{\pi}{4}, \frac{\pi}{2}\right) + \mathrm{MZ}(\phi_{in}, \phi_{ex}) = BS\left(\frac{\pi}{4}, \frac{\pi}{2}\right) (R(\phi_{in})\otimes I) BS\left(\frac{\pi}{4}, \frac{\pi}{2}\right) (R(\phi_{ex})\otimes I) Args: - phi_ex (float): external phase phi_in (float): internal phase + phi_ex (float): external phase """ ns = 2 - def __init__(self, phi_ex, phi_in): - super().__init__([phi_ex, phi_in]) + def __init__(self, phi_in, phi_ex): + super().__init__([phi_in, phi_ex]) def _decompose(self, reg, **kwargs): # into local phase shifts and two 50-50 beamsplitters return [ - Command(Rgate(self.p[0]), reg[0]), - Command(BSgate(np.pi/4, np.pi/2), reg), Command(Rgate(self.p[1]), reg[0]), + Command(BSgate(np.pi/4, np.pi/2), reg), + Command(Rgate(self.p[0]), reg[0]), Command(BSgate(np.pi/4, np.pi/2), reg) ] @@ -1472,7 +1472,7 @@ def _decompose(self, reg, **kwargs): if "symmetric" in mesh: # Mach-Zehnder interferometers - cmds.append(Command(MZgate(np.mod(phi, 2*np.pi), np.mod(theta, 2*np.pi)), (reg[n], reg[m]))) + cmds.append(Command(MZgate(np.mod(theta, 2*np.pi), np.mod(phi, 2*np.pi)), (reg[n], reg[m]))) else: # Clements style beamsplitters diff --git a/tests/frontend/test_circuitspecs_chip0.py b/tests/frontend/test_circuitspecs_chip0.py deleted file mode 100644 index 4fd2a6e6d..000000000 --- a/tests/frontend/test_circuitspecs_chip0.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -r"""Unit tests for the CircuitSpec class""" -import textwrap - -import pytest -import numpy as np -import networkx as nx - -import blackbird - -import strawberryfields as sf -import strawberryfields.ops as ops - -from strawberryfields.parameters import par_evaluate -from strawberryfields.program_utils import CircuitError, list_to_DAG -from strawberryfields.io import to_program -from strawberryfields.utils import random_interferometer -from strawberryfields.circuitspecs.chip0 import Chip0Specs, CircuitSpecs - - -pytestmark = pytest.mark.frontend - -np.random.seed(42) - - -def program_equivalence(prog1, prog2, compare_params=True, atol=1e-6, rtol=0): - r"""Checks if two programs are equivalent. - - This function converts the program lists into directed acyclic graphs, - and runs the NetworkX `is_isomorphic` graph function in order - to determine if the two programs are equivalent. - - Note: when checking for parameter equality between two parameters - :math:`a` and :math:`b`, we use the following formula: - - .. math:: |a - b| \leq (\texttt{atol} + \texttt{rtol}\times|b|) - - Args: - prog1 (strawberryfields.program.Program): quantum program - prog2 (strawberryfields.program.Program): quantum program - compare_params (bool): Set to ``False`` to turn of comparing - program parameters; equivalency will only take into - account the operation order. - atol (float): the absolute tolerance parameter for checking - quantum operation parameter equality - rtol (float): the relative tolerance parameter for checking - quantum operation parameter equality - - Returns: - bool: returns ``True`` if two quantum programs are equivalent - """ - DAG1 = list_to_DAG(prog1.circuit) - DAG2 = list_to_DAG(prog2.circuit) - - circuit = [] - for G in [DAG1, DAG2]: - # relabel the DAG nodes to integers - circuit.append(nx.convert_node_labels_to_integers(G)) - - # add node attributes to store the operation name and parameters - name_mapping = {i: n.op.__class__.__name__ for i, n in enumerate(G.nodes())} - parameter_mapping = {i: par_evaluate(n.op.p) for i, n in enumerate(G.nodes())} - - # CXgate and BSgate are not symmetric wrt to permuting the order of the two - # modes it acts on; i.e., the order of the wires matter - wire_mapping = {} - for i, n in enumerate(G.nodes()): - if n.op.__class__.__name__ == "CXgate": - if np.allclose(n.op.p[0], 0): - # if the CXgate parameter is 0, wire order doesn't matter - wire_mapping[i] = 0 - else: - # if the CXgate parameter is not 0, order matters - wire_mapping[i] = [j.ind for j in n.reg] - - elif n.op.__class__.__name__ == "BSgate": - if np.allclose([j % np.pi for j in par_evaluate(n.op.p)], [np.pi/4, np.pi/2]): - # if the beamsplitter is *symmetric*, then the order of the - # wires does not matter. - wire_mapping[i] = 0 - else: - # beamsplitter is not symmetric, order matters - wire_mapping[i] = [j.ind for j in n.reg] - - else: - # not a CXgate or a BSgate, order of wires doesn't matter - wire_mapping[i] = 0 - - # TODO: at the moment, we do not check for whether an empty - # wire will match an operation with trivial parameters. - # Maybe we can do this in future, but this is a subgraph - # isomorphism problem and much harder. - - nx.set_node_attributes(circuit[-1], name_mapping, name="name") - nx.set_node_attributes(circuit[-1], parameter_mapping, name="p") - nx.set_node_attributes(circuit[-1], wire_mapping, name="w") - - def node_match(n1, n2): - """Returns True if both nodes have the same name and - same parameters, within a certain tolerance""" - name_match = n1["name"] == n2["name"] - p_match = np.allclose(n1["p"], n2["p"], atol=atol, rtol=rtol) - wire_match = n1["w"] == n2["w"] - - if compare_params: - return name_match and p_match and wire_match - - return name_match and wire_match - - # check if circuits are equivalent - return nx.is_isomorphic(circuit[0], circuit[1], node_match) - - -class DummyCircuit(CircuitSpecs): - """Dummy circuit used to instantiate - the abstract base class""" - - modes = 4 - remote = False - local = True - interactive = True - primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate"} - decompositions = {"Interferometer": {}, "MZgate": {}} - - -class TestChip0Compilation: - """Tests for compilation using the Chip0 circuit specification""" - - def test_exact_template(self, tol): - """Test compilation works for the exact circuit""" - bb = blackbird.loads(Chip0Specs.circuit) - bb = bb( - squeezing_amplitude_0=0.43, - squeezing_amplitude_1=0.65, - external_phase_0=0.54, - internal_phase_0=-0.23, - final_phase_0=1.24, - final_phase_1=-0.54, - final_phase_2=4.12, - final_phase_3=0, - ) - - expected = to_program(bb) - res = expected.compile("chip0") - - assert program_equivalence(res, expected, atol=tol) - - def test_not_all_modes_measured(self): - """Test exceptions raised if not all modes are measured""" - prog = sf.Program(4) - U = random_interferometer(2) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.Interferometer(U) | (q[0], q[1]) - ops.Interferometer(U) | (q[2], q[3]) - ops.MeasureFock() | (q[0], q[1]) - - with pytest.raises(CircuitError, match="All modes must be measured"): - res = prog.compile("chip0") - - def test_no_s2gates(self): - """Test exceptions raised if no S2gates are present""" - prog = sf.Program(4) - U = random_interferometer(2) - - with prog.context as q: - ops.Interferometer(U) | (q[0], q[1]) - ops.Interferometer(U) | (q[2], q[3]) - ops.MeasureFock() | q - - with pytest.raises(CircuitError, match="must start with two S2gates"): - res = prog.compile("chip0") - - def test_incorrect_s2gates(self): - """Test exceptions raised if S2gates do not appear on correct modes""" - prog = sf.Program(4) - U = random_interferometer(2) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.Interferometer(U) | (q[0], q[1]) - ops.Interferometer(U) | (q[2], q[3]) - ops.MeasureFock() | q - - with pytest.raises(CircuitError, match="S2gates do not appear on the correct modes"): - res = prog.compile("chip0") - - def test_no_unitary(self, tol): - """Test compilation works with no unitary provided""" - prog = sf.Program(4) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.MeasureFock() | q - - res = prog.compile("chip0") - - for cmd in res.circuit: - print(cmd) - - expected = sf.Program(4) - - with expected.context as q: - ops.S2gate(0.5, 0) | (q[0], q[2]) - ops.S2gate(0.5, 0) | (q[1], q[3]) - - # corresponds to an identity on modes [0, 1] - ops.Rgate(0) | q[0] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(np.pi) | q[0] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(np.pi) | q[0] - ops.Rgate(0) | q[1] - - # corresponds to an identity on modes [2, 3] - ops.Rgate(0) | q[2] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(np.pi) | q[2] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[3], q[2]) - ops.Rgate(np.pi) | q[2] - ops.Rgate(0) | q[3] - - ops.MeasureFock() | q - - assert program_equivalence(res, expected, atol=tol) - - def test_interferometers(self, tol): - """Test interferometers correctly decompose to MZ gates""" - prog = sf.Program(4) - U = random_interferometer(2) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.Interferometer(U) | (q[0], q[1]) - ops.Interferometer(U) | (q[2], q[3]) - ops.MeasureFock() | q - - res = prog.compile("chip0") - - expected = sf.Program(4) - - with expected.context as q: - ops.S2gate(0.5, 0) | (q[0], q[2]) - ops.S2gate(0.5, 0) | (q[1], q[3]) - ops.Interferometer(U, mesh="rectangular_symmetric", drop_identity=False) | (q[0], q[1]) - ops.Interferometer(U, mesh="rectangular_symmetric", drop_identity=False) | (q[2], q[3]) - ops.MeasureFock() | q - - expected = expected.compile(DummyCircuit()) - - assert program_equivalence(res, expected, atol=tol) - - def test_unitaries_do_not_match(self): - """Test exception raised if the unitary applied to modes [0, 1] is - different to the unitary applied to modes [2, 3]""" - prog = sf.Program(4) - U = random_interferometer(2) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.Interferometer(U) | (q[0], q[1]) - ops.Interferometer(U) | (q[2], q[3]) - ops.BSgate() | (q[2], q[3]) - ops.MeasureFock() | q - - with pytest.raises(CircuitError, match="must be identical to interferometer"): - res = prog.compile("chip0") - - def test_unitary_too_large(self): - """Test exception raised if the unitary is applied to more - than just modes [0, 1] and [2, 3].""" - prog = sf.Program(4) - U = random_interferometer(4) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.Interferometer(U) | q - ops.BSgate() | (q[2], q[3]) - ops.MeasureFock() | q - - with pytest.raises(CircuitError, match="must be applied separately"): - res = prog.compile("chip0") - - def test_mach_zehnder_interferometers(self, tol): - """Test Mach-Zehnder gates correctly compile""" - prog = sf.Program(4) - phi = 0.543 - theta = -1.654 - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[3], q[1]) - ops.MZgate(phi, theta) | (q[0], q[1]) - ops.MZgate(phi, theta) | (q[2], q[3]) - ops.MeasureFock() | q - - res = prog.compile("chip0") - - expected = sf.Program(4) - - with expected.context as q: - ops.S2gate(0.5, 0) | (q[0], q[2]) - ops.S2gate(0.5, 0) | (q[1], q[3]) - - # corresponds to MZgate(phi, theta) on modes [0, 1] - ops.Rgate(np.mod(phi, 2*np.pi)) | q[0] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(np.mod(theta, 2*np.pi)) | q[0] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(0) | q[0] - ops.Rgate(0) | q[1] - - # corresponds to MZgate(phi, theta) on modes [2, 3] - ops.Rgate(np.mod(phi, 2*np.pi)) | q[2] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(np.mod(theta, 2*np.pi)) | q[2] - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(0) | q[2] - ops.Rgate(0) | q[3] - - ops.MeasureFock() | (q[0], q[3], q[1], q[2]) - - assert program_equivalence(res, expected, atol=tol) - - def test_50_50_BSgate(self, tol): - """Test 50-50 BSgates correctly compile""" - prog = sf.Program(4) - - with prog.context as q: - ops.S2gate(0.5) | (q[0], q[2]) - ops.S2gate(0.5) | (q[1], q[3]) - ops.BSgate() | (q[0], q[1]) - ops.BSgate() | (q[2], q[3]) - ops.MeasureFock() | q - - res = prog.compile("chip0") - - expected = sf.Program(4) - - with expected.context as q: - ops.S2gate(0.5, 0) | (q[0], q[2]) - ops.S2gate(0.5, 0) | (q[1], q[3]) - - # corresponds to BSgate() on modes [0, 1] - ops.Rgate(0) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(3 * np.pi / 2) | (q[0]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[0], q[1]) - ops.Rgate(3 * np.pi / 4) | (q[0]) - ops.Rgate(-np.pi / 4) | (q[1]) - - # corresponds to BSgate() on modes [2, 3] - ops.Rgate(0) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(3 * np.pi / 2) | (q[2]) - ops.BSgate(np.pi / 4, np.pi / 2) | (q[2], q[3]) - ops.Rgate(3 * np.pi / 4) | (q[2]) - ops.Rgate(-np.pi / 4) | (q[3]) - - ops.MeasureFock() | q - - assert program_equivalence(res, expected, atol=tol) diff --git a/tests/frontend/test_circuitspecs_chip2.py b/tests/frontend/test_circuitspecs_chip2.py index 48c54d5e7..cfd8b761e 100644 --- a/tests/frontend/test_circuitspecs_chip2.py +++ b/tests/frontend/test_circuitspecs_chip2.py @@ -17,6 +17,7 @@ import pytest import numpy as np import networkx as nx +from scipy.linalg import block_diag import blackbird @@ -38,6 +39,33 @@ """float: the allowed squeezing amplitude""" +def TMS(r, phi): + """Two-mode squeezing. + + Args: + r (float): squeezing magnitude + phi (float): rotation parameter + + Returns: + array: symplectic transformation matrix + """ + cp = np.cos(phi) + sp = np.sin(phi) + ch = np.cosh(r) + sh = np.sinh(r) + + S = np.array( + [ + [ch, cp * sh, 0, sp * sh], + [cp * sh, ch, sp * sh, 0], + [0, sp * sh, ch, -cp * sh], + [sp * sh, 0, -cp * sh, ch], + ] + ) + + return S + + def program_equivalence(prog1, prog2, compare_params=True, atol=1e-6, rtol=0): r"""Checks if two programs are equivalent. @@ -296,7 +324,7 @@ def test_s2gate_repeated_modes(self): with pytest.raises(CircuitError, match="incompatible topology."): res = prog.compile("chip2") - def test_mzgate(self): + def test_gates_compile(self): """Test that combinations of MZgates, Rgates, and BSgates correctly compile.""" prog = sf.Program(8) @@ -345,7 +373,7 @@ def test_no_unitary(self, tol): ops.MZgate(np.pi, np.pi) | (q[1], q[2]) ops.MZgate(0, 0) | (q[0], q[1]) ops.MZgate(0, 0) | (q[2], q[3]) - ops.MZgate(0, np.pi) | (q[1], q[2]) + ops.MZgate(np.pi, 0) | (q[1], q[2]) ops.Rgate(np.pi) | (q[0]) ops.Rgate(0) | (q[1]) ops.Rgate(np.pi) | (q[2]) @@ -357,7 +385,7 @@ def test_no_unitary(self, tol): ops.MZgate(np.pi, np.pi) | (q[5], q[6]) ops.MZgate(0, 0) | (q[4], q[5]) ops.MZgate(0, 0) | (q[6], q[7]) - ops.MZgate(0, np.pi) | (q[5], q[6]) + ops.MZgate(np.pi, 0) | (q[5], q[6]) ops.Rgate(np.pi) | (q[4]) ops.Rgate(0) | (q[5]) ops.Rgate(np.pi) | (q[6]) @@ -367,6 +395,105 @@ def test_no_unitary(self, tol): assert program_equivalence(res, expected, atol=tol) + # double check that the applied symplectic is correct + + # remove the Fock measurements + res.circuit = res.circuit[:-1] + + # extract the Gaussian symplectic matrix + O = res.compile("gaussian_unitary").circuit[0].op.p[0] + + # construct the expected symplectic matrix corresponding + # to just the initial two mode squeeze gates + S = TMS(SQ_AMPLITUDE, 0) + + expected = np.zeros([2*8, 2*8]) + idx = np.arange(2*8).reshape(4, 4).T + for i in idx: + expected[i.reshape(-1, 1), i.reshape(1, -1)] = S + + assert np.allclose(O, expected, atol=tol) + + def test_mz_gate_standard(self, tol): + """Test that the Mach-Zehnder gate compiles to give the correct unitary + for some specific standard parameters""" + prog = sf.Program(8) + + with prog.context as q: + ops.MZgate(np.pi/2, np.pi) | (q[0], q[1]) + ops.MZgate(np.pi, 0) | (q[2], q[3]) + ops.MZgate(np.pi/2, np.pi) | (q[4], q[5]) + ops.MZgate(np.pi, 0) | (q[6], q[7]) + ops.MeasureFock() | q + + # compile the program using the chip2 spec + res = prog.compile("chip2") + + # remove the Fock measurements + res.circuit = res.circuit[:-1] + + # extract the Gaussian symplectic matrix + O = res.compile("gaussian_unitary").circuit[0].op.p[0] + + # By construction, we know that the symplectic matrix is + # passive, and so represents a unitary matrix + U = O[:8, :8] + 1j*O[8:, :8] + + # the constructed program should implement the following + # unitary matrix + expected = np.array( + [[0.5-0.5j, -0.5+0.5j, 0, 0], + [0.5-0.5j, 0.5-0.5j, 0, 0], + [0, 0, -1, -0], + [0, 0, -0, 1]] + ) + expected = block_diag(expected, expected) + + assert np.allclose(U, expected, atol=tol) + + @pytest.mark.parametrize("theta1", np.linspace(0, 2*np.pi-0.2, 7)) + @pytest.mark.parametrize("phi1", np.linspace(0, 2*np.pi-0.1, 7)) + def test_mz_gate_non_standard(self, theta1, phi1, tol): + """Test that the Mach-Zehnder gate compiles to give the correct unitary + for a variety of non-standard angles""" + prog = sf.Program(8) + + theta2 = np.pi/13 + phi2 = 3*np.pi/7 + + with prog.context as q: + ops.MZgate(theta1, phi1) | (q[0], q[1]) + ops.MZgate(theta2, phi2) | (q[2], q[3]) + ops.MZgate(theta1, phi1) | (q[4], q[5]) + ops.MZgate(theta2, phi2) | (q[6], q[7]) + ops.MeasureFock() | q + + # compile the program using the chip2 spec + res = prog.compile("chip2") + + # remove the Fock measurements + res.circuit = res.circuit[:-1] + + # extract the Gaussian symplectic matrix + O = res.compile("gaussian_unitary").circuit[0].op.p[0] + + # By construction, we know that the symplectic matrix is + # passive, and so represents a unitary matrix + U = O[:8, :8] + 1j*O[8:, :8] + + # the constructed program should implement the following + # unitary matrix + expected = np.array([ + [(np.exp(1j * phi1) * (-1 + np.exp(1j * theta1))) / 2.0, 0.5j * (1 + np.exp(1j * theta1)), 0, 0], + [0.5j * np.exp(1j * phi1) * (1 + np.exp(1j * theta1)), (1 - np.exp(1j * theta1)) / 2.0, 0, 0], + [0, 0, (np.exp(1j * phi2) * (-1 + np.exp(1j * theta2))) / 2.0, 0.5j * (1 + np.exp(1j * theta2))], + [0, 0, 0.5j * np.exp(1j * phi2) * (1 + np.exp(1j * theta2)), (1 - np.exp(1j * theta2)) / 2.0], + ]) + expected = block_diag(expected, expected) + + assert np.allclose(U, expected, atol=tol) + + def test_interferometers(self, tol): """Test that the compilation correctly decomposes the interferometer using the rectangular_symmetric mesh""" diff --git a/tests/frontend/test_engine.py b/tests/frontend/test_engine.py index cd1c5e494..aa716b489 100644 --- a/tests/frontend/test_engine.py +++ b/tests/frontend/test_engine.py @@ -58,7 +58,7 @@ def starship_engine(monkeypatch): """ mock_api_client = MagicMock() monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine("chip0", polling_delay_seconds=0) + engine = StarshipEngine("chip2", polling_delay_seconds=0) return engine @@ -190,7 +190,7 @@ def test_init(self, monkeypatch): """ mock_api_client = MagicMock() monkeypatch.setattr("strawberryfields.engine.APIClient", mock_api_client) - engine = StarshipEngine("chip0") + engine = StarshipEngine("chip2") assert engine.client == mock_api_client() assert engine.jobs == [] assert engine.REMOTE == True @@ -340,7 +340,7 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): # NOTE: this is currently more of an integration test, currently a WIP / under development. api_client_params = {"hostname": "localhost"} - engine = StarshipEngine("chip0", polling_delay_seconds=0, **api_client_params) + engine = StarshipEngine("chip2", polling_delay_seconds=0, **api_client_params) # We don't want to actually send any requests, though we should make sure POST was called mock_api_client_post = MagicMock() @@ -361,25 +361,21 @@ def test_engine_with_mocked_api_client_sample_job(self, monkeypatch): monkeypatch.setattr(APIClient, "post", mock_api_client_post) monkeypatch.setattr(APIClient, "get", mock_get) - prog = sf.Program(4) + prog = sf.Program(8) - sqz0 = 1.0 - sqz1 = 1.0 phi0 = 0.574 phi1 = 1.33 pi = 3.14 with prog.context as q: - ops.S2gate(sqz0, 0.0) | (q[0], q[2]) - ops.S2gate(sqz1, 0.0) | (q[1], q[3]) + ops.S2gate(1, 0) | (q[0], q[4]) + ops.S2gate(1, 0) | (q[1], q[5]) + ops.S2gate(1, 0) | (q[2], q[6]) + ops.S2gate(1, 0) | (q[3], q[7]) ops.Rgate(phi0) | q[0] ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) - ops.Rgate(phi1) | q[0] - ops.BSgate(pi / 4, pi / 2) | (q[0], q[1]) - ops.Rgate(phi0) | q[2] - ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) - ops.Rgate(phi1) | q[2] - ops.BSgate(pi / 4, pi / 2) | (q[2], q[3]) + ops.Rgate(phi0) | q[4] + ops.BSgate(pi / 4, pi / 2) | (q[4], q[5]) ops.MeasureFock() | q engine.run(prog) From 5275040f6dd4f48a2a9d06b32c6ca3fa34e70b30 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 09:32:54 -0500 Subject: [PATCH 296/335] Update docstring example --- strawberryfields/api/connection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index c59dc4ec6..9e20677ac 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -59,7 +59,10 @@ class Connection: >>> job.status "queued" >>> job.result - AttributeError + AttributeError Traceback (most recent call last) + in + ... + AttributeError: The result is undefined for jobs that are not completed (current status: queued) >>> job = connection.get_job(known_job_id) >>> job From 4ac548e1f9b750aac2507337a5d3ea8b48dd6d76 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 09:46:19 -0500 Subject: [PATCH 297/335] Remove token from CLI args - load using normal config logic --- starship | 7 +------ strawberryfields/engine.py | 11 +---------- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/starship b/starship index 6ba3403b5..0c7f2b91f 100755 --- a/starship +++ b/starship @@ -28,9 +28,6 @@ from strawberryfields.io import load if __name__ == "__main__": parser = argparse.ArgumentParser(description="run a blackbird script") - parser.add_argument( - "--token", "-t", help="the API authentication token", required=True - ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--input", "-i", help="the xbb file to run") group.add_argument( @@ -44,8 +41,6 @@ if __name__ == "__main__": args = parser.parse_args() - connection = Connection(token=args.token) - if args.ping: connection.ping() sys.stdout.write("You have successfully authenticated to the platform!\n") @@ -53,7 +48,7 @@ if __name__ == "__main__": program = load(args.input) - eng = StarshipEngine(program.target, connection) + eng = StarshipEngine(program.target) sys.stdout.write("Executing program on remote hardware...\n") result = eng.run(program) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 43ba1da9d..50c3c0830 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -505,22 +505,13 @@ class StarshipEngine: POLLING_INTERVAL_SECONDS = 1 VALID_TARGETS = ("chip2",) - def __init__(self, target: str, connection: Connection = None): + def __init__(self, target: str, connection: Connection = Connection()): if target not in self.VALID_TARGETS: raise ValueError( "Invalid engine target: {} (valid targets: {})".format( target, self.VALID_TARGETS ) ) - if connection is None: - config = load_config()["api"] - connection = Connection( - token=config["authentication_token"], - host=config["hostname"], - port=config["port"], - use_ssl=config["use_ssl"], - ) - self._target = target self._connection = connection From fa8b43d1792e4e5c641f120df6e6952f12ef92ef Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 10:01:15 -0500 Subject: [PATCH 298/335] Re-black with line length 100 --- strawberryfields/api/connection.py | 16 ++++------------ strawberryfields/api/job.py | 4 +--- strawberryfields/engine.py | 23 ++++++----------------- tests/api/test_connection.py | 16 ++++------------ tests/api/test_job.py | 15 ++++----------- 5 files changed, 19 insertions(+), 55 deletions(-) diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 9e20677ac..dc46baeb9 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -93,9 +93,7 @@ def __init__( self._use_ssl = use_ssl self._verbose = verbose - self._base_url = "http{}://{}:{}".format( - "s" if self.use_ssl else "", self.host, self.port - ) + self._base_url = "http{}://{}:{}".format("s" if self.use_ssl else "", self.host, self.port) self._headers = {"Authorization": self.token} @property @@ -155,9 +153,7 @@ def create_job(self, target: str, program: Program, shots: int) -> Job: path = "/jobs" response = requests.post( - self._url(path), - headers=self._headers, - data=json.dumps({"circuit": circuit}), + self._url(path), headers=self._headers, data=json.dumps({"circuit": circuit}), ) if response.status_code == 201: if self._verbose: @@ -247,9 +243,7 @@ def cancel_job(self, job_id: str): """ path = "/jobs/{}".format(job_id) response = requests.patch( - self._url(path), - headers=self._headers, - data={"status": JobStatus.CANCELLED.value}, + self._url(path), headers=self._headers, data={"status": JobStatus.CANCELLED.value}, ) if response.status_code == 204: if self._verbose: @@ -280,9 +274,7 @@ def _format_error_message(response: requests.Response) -> str: ) def __repr__(self): - return "<{}: token={}, host={}>".format( - self.__class__.__name__, self.token, self.host - ) + return "<{}: token={}, host={}>".format(self.__class__.__name__, self.token, self.host) def __str__(self): return self.__repr__() diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index fa5903e84..c2e3897bf 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -138,9 +138,7 @@ def cancel(self): self._connection.cancel_job(self.id) def __repr__(self): - return "<{}: id={}, status={}>".format( - self.__class__.__name__, self.id, self._status.value - ) + return "<{}: id={}, status={}>".format(self.__class__.__name__, self.id, self._status.value) def __str__(self): return self.__repr__() diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 50c3c0830..3e5bfac69 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -254,9 +254,7 @@ def _broadcast_nones(val, dim): # signatures of methods in Operations to remain cleaner, since only # Measurements need to know about shots - prev = ( - self.run_progs[-1] if self.run_progs else None - ) # previous program segment + prev = self.run_progs[-1] if self.run_progs else None # previous program segment for p in program: if prev is None: # initialize the backend @@ -265,9 +263,7 @@ def _broadcast_nones(val, dim): # there was a previous program segment if not p.can_follow(prev): raise RuntimeError( - "Register mismatch: program {}, '{}'.".format( - len(self.run_progs), p.name - ) + "Register mismatch: program {}, '{}'.".format(len(self.run_progs), p.name) ) # Copy the latest measured values in the RegRefs of p. @@ -291,8 +287,7 @@ def _broadcast_nones(val, dim): self._run_program(p, **kwargs) shots = kwargs.get("shots", 1) self.samples = [ - _broadcast_nones(p.reg_refs[k].val, shots) - for k in sorted(p.reg_refs) + _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) ] self.run_progs.append(p) @@ -364,9 +359,7 @@ def _run_program(self, prog, **kwargs): except NotApplicableError: # command is not applicable to the current backend type raise NotApplicableError( - "The operation {} cannot be used with {}.".format( - cmd.op, self.backend - ) + "The operation {} cannot be used with {}.".format(cmd.op, self.backend) ) from None except NotImplementedError: # command not directly supported by backend API @@ -432,9 +425,7 @@ def run(self, program, *, args=None, compile_options=None, run_options=None): # check that batching is not used together with shots > 1 if self.backend_options.get("batch_size", 0) and eng_run_options["shots"] > 1: - raise NotImplementedError( - "Batching cannot be used together with multiple shots." - ) + raise NotImplementedError("Batching cannot be used together with multiple shots.") # check that post-selection and feed-forwarding is not used together with shots > 1 for p in program_lst: @@ -508,9 +499,7 @@ class StarshipEngine: def __init__(self, target: str, connection: Connection = Connection()): if target not in self.VALID_TARGETS: raise ValueError( - "Invalid engine target: {} (valid targets: {})".format( - target, self.VALID_TARGETS - ) + "Invalid engine target: {} (valid targets: {})".format(target, self.VALID_TARGETS) ) self._target = target self._connection = connection diff --git a/tests/api/test_connection.py b/tests/api/test_connection.py index a7ab9526c..1a34e8eb1 100644 --- a/tests/api/test_connection.py +++ b/tests/api/test_connection.py @@ -68,9 +68,7 @@ def test_create_job(self, prog, connection, monkeypatch): id_, status = "123", JobStatus.QUEUED monkeypatch.setattr( - requests, - "post", - mock_return(MockResponse(201, {"id": id_, "status": status})), + requests, "post", mock_return(MockResponse(201, {"id": id_, "status": status})), ) job = connection.create_job("chip2", prog, 1) @@ -117,9 +115,7 @@ def test_get_job(self, connection, monkeypatch): id_, status = "123", JobStatus.COMPLETED monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, {"id": id_, "status": status.value})), + requests, "get", mock_return(MockResponse(200, {"id": id_, "status": status.value})), ) job = connection.get_job(id_) @@ -139,9 +135,7 @@ def test_get_job_status(self, connection, monkeypatch): id_, status = "123", JobStatus.COMPLETED monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, {"id": id_, "status": status.value})), + requests, "get", mock_return(MockResponse(200, {"id": id_, "status": status.value})), ) assert connection.get_job_status(id_) == status.value @@ -176,9 +170,7 @@ def test_get_job_result(self, connection, result_dtype, monkeypatch): np.save(buf, result_samples) buf.seek(0) monkeypatch.setattr( - requests, - "get", - mock_return(MockResponse(200, binary_body=buf.getvalue())), + requests, "get", mock_return(MockResponse(200, binary_body=buf.getvalue())), ) result = connection.get_job_result("123") diff --git a/tests/api/test_job.py b/tests/api/test_job.py index b18c22a80..1d42a7330 100644 --- a/tests/api/test_job.py +++ b/tests/api/test_job.py @@ -31,8 +31,7 @@ def test_incomplete_job_raises_on_result_access(self, connection): job = Job("abc", status=JobStatus.QUEUED, connection=connection) with pytest.raises( - AttributeError, - match="The result is undefined for jobs that are not completed", + AttributeError, match="The result is undefined for jobs that are not completed", ): job.result @@ -40,25 +39,19 @@ def test_completed_job_raises_on_cancel_request(self, connection): """Tests that `job.cancel()` raises an error for a completed job.""" job = Job("abc", status=JobStatus.COMPLETED, connection=connection) - with pytest.raises( - InvalidJobOperationError, match="A complete job cannot be cancelled" - ): + with pytest.raises(InvalidJobOperationError, match="A complete job cannot be cancelled"): job.cancel() def test_failed_job_raises_on_cancel_request(self, connection): """Tests that `job.cancel()` raises an error for a failed job.""" job = Job("abc", status=JobStatus.FAILED, connection=connection) - with pytest.raises( - InvalidJobOperationError, match="A failed job cannot be cancelled" - ): + with pytest.raises(InvalidJobOperationError, match="A failed job cannot be cancelled"): job.cancel() def test_cancelled_job_raises_on_cancel_request(self, connection): """Tests that `job.cancel()` raises an error for a completed job.""" job = Job("abc", status=JobStatus.CANCELLED, connection=connection) - with pytest.raises( - InvalidJobOperationError, match="A cancelled job cannot be cancelled" - ): + with pytest.raises(InvalidJobOperationError, match="A cancelled job cannot be cancelled"): job.cancel() From b840acd7d860e0be0cbf1f3881531d738a6a0097 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 10:23:37 -0500 Subject: [PATCH 299/335] Update changelog --- .github/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 95df59f88..15cc0cad8 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -41,6 +41,10 @@ and configuration file. [#298](https://github.com/XanaduAI/strawberryfields/pull/298) +* Refactored the existing `StarshipEngine` to use a new `Connection`/`Job` API + and updated the `starship` CLI to use the new interface. + [#294](https://github.com/XanaduAI/strawberryfields/pull/294) + ### Bug fixes * Symbolic Operation parameters are now compatible with TensorFlow 2.0 objects. From 9de85c31e99151ae40c012fd99b16871875f4d53 Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 28 Feb 2020 10:36:43 -0500 Subject: [PATCH 300/335] CHANGELOG --- .github/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index f1a57b2ed..002bf237a 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -12,6 +12,10 @@ [295](https://github.com/XanaduAI/strawberryfields/pull/295) ### Improvements +* Added the `store_account` user convenience function that helps with + configuring access to the Xanadu cloud platform. + [#306](https://github.com/XanaduAI/strawberryfields/pull/306) + * Added two-mode squeezed operation support as a primitive, rather than simply through decomposition, using The Walrus for fast computation. [#289](https://github.com/XanaduAI/strawberryfields/pull/289) From fb2dd85b67d99feab29d12fac8db3c8c17a0e80b Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 28 Feb 2020 10:44:34 -0500 Subject: [PATCH 301/335] Linting --- doc/introduction/configuration.rst | 2 +- strawberryfields/configuration.py | 8 ++++---- tests/frontend/test_configuration.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 40e05a9b6..67a42be01 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -79,7 +79,7 @@ In these examples ``"MyToken"`` should be replaced with a valid authentication t The ``store_account`` function only needs to be executed once, when initially configuring your system to connect to the Xanadu cloud platform. - + Take care not to share or publicly commit your authentication token, as it provides full access to your account. diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 2d6dbc2a0..7227d1037 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -225,16 +225,16 @@ def store_account(authentication_token, filename="config.toml", location="user_c saving your account credentials. The configuration file can be created in the following locations: - + - A global user configuration directory (``"user_config"``) - The current working directory (``"local"``) - + This global user configuration directory differs depending on the operating system: - + * On Linux: ``~/.config/strawberryfields`` * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` * On MacOS: ``~/Library/Application Support/strawberryfields`` - + By default, Strawberry Fields will load the configuration and account credentials from the global user configuration directory, no matter the working directory. However, if there exists a configuration file in the *local* working directory, this takes precedence. The ``"local"`` option is therefore useful diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index 1b979577c..f1d554dde 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -182,9 +182,9 @@ def test_env_variable(self, monkeypatch, tmpdir): This is a test case for when there is no configuration file in the current directory.""" - filename = "config.toml" + filename = "config.toml" - path_to_write_file = tmpdir.join(filename) + path_to_write_file = tmpdir.join(filename) with open(path_to_write_file, "w") as f: f.write(TEST_FILE) From 1d5c75eb29848dcf91d4f597af8443eab061f27c Mon Sep 17 00:00:00 2001 From: antalszava Date: Fri, 28 Feb 2020 10:47:01 -0500 Subject: [PATCH 302/335] Blacking; isort sf/__init__.py --- strawberryfields/__init__.py | 5 +- strawberryfields/configuration.py | 14 +- tests/frontend/test_configuration.py | 221 ++++++++++++++++----------- 3 files changed, 148 insertions(+), 92 deletions(-) diff --git a/strawberryfields/__init__.py b/strawberryfields/__init__.py index 119bd739e..e249ad4e8 100644 --- a/strawberryfields/__init__.py +++ b/strawberryfields/__init__.py @@ -23,12 +23,11 @@ """ from . import apps from ._version import __version__ +from .configuration import store_account from .engine import Engine, LocalEngine, StarshipEngine from .io import load, save -from .program import Program from .parameters import par_funcs as math -from .configuration import store_account - +from .program import Program __all__ = ["Engine", "StarshipEngine", "Program", "version", "save", "load", "about", "cite"] diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 7227d1037..9fc3ee10b 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -35,9 +35,10 @@ "hostname": (str, "localhost"), "use_ssl": (bool, True), "port": (int, 443), - } + } } + class ConfigurationError(Exception): """Exception used for configuration errors""" @@ -83,6 +84,7 @@ def load_config(filename="config.toml", **kwargs): return config + def create_config(authentication_token="", **kwargs): """Create a configuration object that stores configuration related data organized into sections. @@ -110,10 +112,11 @@ def create_config(authentication_token="", **kwargs): "hostname": hostname, "use_ssl": use_ssl, "port": port, - } + } } return config + def get_config_filepath(filename="config.toml"): """Get the filepath of the first configuration file found from the defined configuration directories (if any). @@ -145,6 +148,7 @@ def get_config_filepath(filename="config.toml"): return None + def load_config_file(filepath): """Load a configuration object from a TOML formatted file. @@ -159,6 +163,7 @@ def load_config_file(filepath): config_from_file = toml.load(f) return config_from_file + def keep_valid_options(sectionconfig): """Filters the valid options in a section of a configuration dictionary. @@ -172,6 +177,7 @@ def keep_valid_options(sectionconfig): """ return {k: v for k, v in sectionconfig.items() if k in VALID_KEYS} + def update_from_environment_variables(config): """Updates the current configuration object from data stored in environment variables. @@ -192,6 +198,7 @@ def update_from_environment_variables(config): if env in os.environ: config[section][key] = parse_environment_variable(key, os.environ[env]) + def parse_environment_variable(key, value): """Parse a value stored in an environment variable. @@ -220,6 +227,7 @@ def parse_environment_variable(key, value): return value + def store_account(authentication_token, filename="config.toml", location="user_config", **kwargs): r"""Configure Strawberry Fields for access to the Xanadu cloud platform by saving your account credentials. @@ -308,6 +316,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c config = create_config(authentication_token=authentication_token, **kwargs) save_config_to_file(config, filepath) + def save_config_to_file(config, filepath): """Saves a configuration to a TOML file. @@ -319,6 +328,7 @@ def save_config_to_file(config, filepath): with open(filepath, "w") as f: toml.dump(config, f) + VALID_KEYS = set(create_config()["api"].keys()) DEFAULT_CONFIG = create_config() configuration = load_config() diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index f1d554dde..e6657500d 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -59,12 +59,13 @@ } environment_variables = [ - "SF_API_AUTHENTICATION_TOKEN", - "SF_API_HOSTNAME", - "SF_API_USE_SSL", - "SF_API_DEBUG", - "SF_API_PORT" - ] + "SF_API_AUTHENTICATION_TOKEN", + "SF_API_HOSTNAME", + "SF_API_USE_SSL", + "SF_API_DEBUG", + "SF_API_PORT", +] + class TestLoadConfig: """Tests for the load_config function.""" @@ -72,7 +73,7 @@ class TestLoadConfig: def test_not_found_warning(self, caplog): """Test that a warning is raised if no configuration file found.""" - conf.load_config(filename='NotAFileName') + conf.load_config(filename="NotAFileName") assert "No Strawberry Fields configuration file found." in caplog.text def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): @@ -90,11 +91,9 @@ def test_keywords_take_precedence_over_everything(self, monkeypatch, tmpdir): m.setenv("SF_API_PORT", "42") m.setattr(os, "getcwd", lambda: tmpdir) - configuration = conf.load_config(authentication_token="SomeAuth", - hostname="SomeHost", - use_ssl=False, - port=56 - ) + configuration = conf.load_config( + authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, port=56 + ) assert configuration == OTHER_EXPECTED_CONFIG @@ -132,30 +131,35 @@ def test_conf_file_loads_well(self, monkeypatch, tmpdir): assert configuration == EXPECTED_CONFIG + class TestCreateConfigObject: """Test the creation of a configuration object""" def test_empty_config_object(self): """Test that an empty configuration object can be created.""" - config = conf.create_config(authentication_token="", - hostname="", - use_ssl="", - port="") + config = conf.create_config(authentication_token="", hostname="", use_ssl="", port="") - assert all(value=="" for value in config["api"].values()) + assert all(value == "" for value in config["api"].values()) def test_config_object_with_authentication_token(self): """Test that passing only the authentication token creates the expected configuration object.""" - assert conf.create_config(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") == EXPECTED_CONFIG + assert ( + conf.create_config(authentication_token="071cdcce-9241-4965-93af-4a4dbc739135") + == EXPECTED_CONFIG + ) def test_config_object_every_keyword_argument(self): """Test that passing every keyword argument creates the expected configuration object.""" - assert conf.create_config(authentication_token="SomeAuth", - hostname="SomeHost", - use_ssl=False, - port=56) == OTHER_EXPECTED_CONFIG + assert ( + conf.create_config( + authentication_token="SomeAuth", hostname="SomeHost", use_ssl=False, port=56 + ) + == OTHER_EXPECTED_CONFIG + ) + + class TestGetConfigFilepath: """Tests for the get_config_filepath function.""" @@ -218,7 +222,11 @@ def raise_wrapper(ex): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: "NoConfigFileHere") m.setenv("SF_CONF", "NoConfigFileHere") - m.setattr(conf, "user_config_dir", lambda x, *args: tmpdir if x=="strawberryfields" else "NoConfigFileHere") + m.setattr( + conf, + "user_config_dir", + lambda x, *args: tmpdir if x == "strawberryfields" else "NoConfigFileHere", + ) config_filepath = conf.get_config_filepath(filename="config.toml") @@ -233,6 +241,7 @@ def test_no_config_file_found_returns_none(self, monkeypatch, tmpdir): -in the directory contained in the corresponding environment variable -in the user_config_dir directory of Strawberry Fields.""" + def raise_wrapper(ex): raise ex @@ -245,6 +254,7 @@ def raise_wrapper(ex): assert config_filepath is None + class TestLoadConfigFile: """Tests the load_config_file function.""" @@ -264,7 +274,6 @@ def test_loading_absolute_path(self, monkeypatch, tmpdir): via an absolute path.""" filename = tmpdir.join("test_config.toml") - with open(filename, "w") as f: f.write(TEST_FILE) @@ -274,47 +283,49 @@ def test_loading_absolute_path(self, monkeypatch, tmpdir): assert loaded_config == EXPECTED_CONFIG -class TestKeepValidOptions: +class TestKeepValidOptions: def test_only_invalid_options(self): - section_config_with_invalid_options = {'NotValid1': 1, - 'NotValid2': 2, - 'NotValid3': 3 - } + section_config_with_invalid_options = {"NotValid1": 1, "NotValid2": 2, "NotValid3": 3} assert conf.keep_valid_options(section_config_with_invalid_options) == {} def test_valid_and_invalid_options(self): - section_config_with_invalid_options = { 'authentication_token': 'MyToken', - 'NotValid1': 1, - 'NotValid2': 2, - 'NotValid3': 3 - } - assert conf.keep_valid_options(section_config_with_invalid_options) == {'authentication_token': 'MyToken'} + section_config_with_invalid_options = { + "authentication_token": "MyToken", + "NotValid1": 1, + "NotValid2": 2, + "NotValid3": 3, + } + assert conf.keep_valid_options(section_config_with_invalid_options) == { + "authentication_token": "MyToken" + } def test_only_valid_options(self): section_config_only_valid = { - "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", - "hostname": "localhost", - "use_ssl": True, - "port": 443, - } + "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", + "hostname": "localhost", + "use_ssl": True, + "port": 443, + } assert conf.keep_valid_options(section_config_only_valid) == EXPECTED_CONFIG["api"] + value_mapping = [ - ("SF_API_AUTHENTICATION_TOKEN","SomeAuth"), - ("SF_API_HOSTNAME","SomeHost"), - ("SF_API_USE_SSL","False"), - ("SF_API_PORT","56"), - ("SF_API_DEBUG","True") - ] + ("SF_API_AUTHENTICATION_TOKEN", "SomeAuth"), + ("SF_API_HOSTNAME", "SomeHost"), + ("SF_API_USE_SSL", "False"), + ("SF_API_PORT", "56"), + ("SF_API_DEBUG", "True"), +] parsed_values_mapping = { - "SF_API_AUTHENTICATION_TOKEN": "SomeAuth", - "SF_API_HOSTNAME": "SomeHost", - "SF_API_USE_SSL": False, - "SF_API_PORT": 56, - "SF_API_DEBUG": True, - } + "SF_API_AUTHENTICATION_TOKEN": "SomeAuth", + "SF_API_HOSTNAME": "SomeHost", + "SF_API_USE_SSL": False, + "SF_API_PORT": 56, + "SF_API_DEBUG": True, +} + class TestUpdateFromEnvironmentalVariables: """Tests for the update_from_environment_variables function.""" @@ -335,13 +346,12 @@ def test_all_environment_variables_defined(self, monkeypatch): for v, parsed_value in zip(config["api"].values(), parsed_values_mapping.values()): assert v == parsed_value - environment_variables_with_keys_and_values = [ - ("SF_API_AUTHENTICATION_TOKEN","authentication_token","SomeAuth"), - ("SF_API_HOSTNAME","hostname","SomeHost"), - ("SF_API_USE_SSL","use_ssl","False"), - ("SF_API_PORT","port", "56"), - ] + ("SF_API_AUTHENTICATION_TOKEN", "authentication_token", "SomeAuth"), + ("SF_API_HOSTNAME", "hostname", "SomeHost"), + ("SF_API_USE_SSL", "use_ssl", "False"), + ("SF_API_PORT", "port", "56"), + ] @pytest.mark.parametrize("env_var, key, value", environment_variables_with_keys_and_values) def test_one_environment_variable_defined(self, env_var, key, value, monkeypatch): @@ -358,7 +368,9 @@ def test_one_environment_variable_defined(self, env_var, key, value, monkeypatch conf.update_from_environment_variables(config) assert config["api"][key] == parsed_values_mapping[env_var] - for v, (key, parsed_value) in zip(config["api"].values(), parsed_values_mapping.items()): + for v, (key, parsed_value) in zip( + config["api"].values(), parsed_values_mapping.items() + ): if key != env_var: assert v != parsed_value @@ -385,15 +397,14 @@ def test_parse_environment_variable_integer(self, monkeypatch): monkeypatch.setattr(conf, "DEFAULT_CONFIG_SPEC", {"api": {"some_integer": (int, 123)}}) assert conf.parse_environment_variable("some_integer", "123") == 123 -DEFAULT_KWARGS = { - "hostname": "localhost", - "use_ssl": True, - "port": 443, - } + +DEFAULT_KWARGS = {"hostname": "localhost", "use_ssl": True, "port": 443} + class MockSaveConfigToFile: """A mock class used to contain the state left by the save_config_to_file function.""" + def __init__(self): self.config = None self.path = None @@ -403,11 +414,13 @@ def update(self, config, path): self.config = config self.path = path + def mock_create_config(authentication_token="", **kwargs): """A mock version of the create_config function adjusted to the store_account function. """ - return {"api": {'authentication_token': authentication_token, **kwargs}} + return {"api": {"authentication_token": authentication_token, **kwargs}} + class TestStoreAccount: """Tests for the store_account function.""" @@ -425,10 +438,12 @@ def test_config_created_locally(self, monkeypatch, tmpdir): m.setattr(conf, "user_config_dir", lambda *args: "NotTheCorrectDir") m.setattr(conf, "create_config", mock_create_config) m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) - conf.store_account(authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS + ) - assert mock_save_config_file.config == EXPECTED_CONFIG - assert mock_save_config_file.path == tmpdir.join("config.toml") + assert mock_save_config_file.config == EXPECTED_CONFIG + assert mock_save_config_file.path == tmpdir.join("config.toml") def test_global_config_created(self, monkeypatch, tmpdir): """Tests that a configuration file was created in the user @@ -443,20 +458,27 @@ def test_global_config_created(self, monkeypatch, tmpdir): m.setattr(conf, "user_config_dir", lambda *args: tmpdir) m.setattr(conf, "create_config", mock_create_config) m.setattr(conf, "save_config_to_file", lambda a, b: mock_save_config_file.update(a, b)) - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) - assert mock_save_config_file.config == EXPECTED_CONFIG - assert mock_save_config_file.path == tmpdir.join("config.toml") + assert mock_save_config_file.config == EXPECTED_CONFIG + assert mock_save_config_file.path == tmpdir.join("config.toml") def test_location_not_recognized_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed to be created in an unrecognized directory.""" - with pytest.raises( - conf.ConfigurationError, - match="This location is not recognized.", - ): - conf.store_account(authentication_token, filename="config.toml", location="UNRECOGNIZED_LOCATION", **DEFAULT_KWARGS) + with pytest.raises(conf.ConfigurationError, match="This location is not recognized."): + conf.store_account( + authentication_token, + filename="config.toml", + location="UNRECOGNIZED_LOCATION", + **DEFAULT_KWARGS + ) def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed @@ -465,8 +487,12 @@ def test_non_existing_directory_does_not_raise_file_not_found_error(self, monkey with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) - + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, tmpdir): """Tests that an error is raised if the configuration file is supposed @@ -476,11 +502,14 @@ def test_non_existing_directory_without_makedirs_raises_error(self, monkeypatch, with monkeypatch.context() as m: m.setattr(os, "makedirs", lambda a, **kwargs: None) m.setattr(conf, "user_config_dir", lambda *args: tmpdir.join("new_dir")) - with pytest.raises( - FileNotFoundError, - match="No such file or directory", - ): - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) + with pytest.raises(FileNotFoundError, match="No such file or directory"): + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) + class TestStoreAccountIntegration: """Integration tests for the store_account function. @@ -495,7 +524,9 @@ def test_local(self, monkeypatch, tmpdir): with monkeypatch.context() as m: m.setattr(os, "getcwd", lambda: tmpdir) - conf.store_account(authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, filename="config.toml", location="local", **DEFAULT_KWARGS + ) filepath = tmpdir.join("config.toml") result = toml.load(filepath) @@ -507,7 +538,12 @@ def test_global(self, monkeypatch, tmpdir): with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: tmpdir) - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) filepath = tmpdir.join("config.toml") result = toml.load(filepath) @@ -518,7 +554,12 @@ def test_directory_is_created(self, monkeypatch, tmpdir): recursive_dir = tmpdir.join(".new_dir") with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) filepath = os.path.join(recursive_dir, "config.toml") result = toml.load(filepath) @@ -529,12 +570,18 @@ def test_nested_directory_is_created(self, monkeypatch, tmpdir): recursive_dir = tmpdir.join(".new_dir", "new_dir_again") with monkeypatch.context() as m: m.setattr(conf, "user_config_dir", lambda *args: recursive_dir) - conf.store_account(authentication_token, filename="config.toml", location="user_config", **DEFAULT_KWARGS) + conf.store_account( + authentication_token, + filename="config.toml", + location="user_config", + **DEFAULT_KWARGS + ) filepath = os.path.join(recursive_dir, "config.toml") result = toml.load(filepath) assert result == EXPECTED_CONFIG + class TestSaveConfigToFile: """Tests for the store_account function.""" From 9cf4467ed396abc5300600925dbdddff81f22a91 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 11:13:12 -0500 Subject: [PATCH 303/335] Update strawberryfields/api/result.py Co-Authored-By: Theodor --- strawberryfields/api/result.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strawberryfields/api/result.py b/strawberryfields/api/result.py index 427526d8d..de2466548 100644 --- a/strawberryfields/api/result.py +++ b/strawberryfields/api/result.py @@ -47,9 +47,9 @@ class Result: >>> print(results) Result: 3 subsystems state: - samples: [0, 0, 0] + samples: [[0, 0, 0]] >>> results.samples - [0, 0, 0] + np.array([[0, 0, 0]]) >>> results.state.is_pure() True From 613a9fe22acfca5c52d48a553cbff18d3464c791 Mon Sep 17 00:00:00 2001 From: Paul Tan Date: Fri, 28 Feb 2020 11:30:01 -0500 Subject: [PATCH 304/335] Remove unused import --- strawberryfields/engine.py | 1 - 1 file changed, 1 deletion(-) diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 3e5bfac69..7911aad19 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -26,7 +26,6 @@ import numpy as np from strawberryfields.api import Connection, Job, Result -from strawberryfields.configuration import load_config from strawberryfields.program import Program from .backends import load_backend From ff1e16d35641a67feb91a65f9ea4d0ac929cd134 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Mon, 2 Mar 2020 21:06:40 +1030 Subject: [PATCH 305/335] merge master into demo --- strawberryfields/configuration.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 9fc3ee10b..cdb07ece3 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -237,16 +237,16 @@ def store_account(authentication_token, filename="config.toml", location="user_c - A global user configuration directory (``"user_config"``) - The current working directory (``"local"``) - This global user configuration directory differs depending on the operating system: + This global user configuration directory differs depending on the operating system: - * On Linux: ``~/.config/strawberryfields`` - * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` - * On MacOS: ``~/Library/Application Support/strawberryfields`` + * On Linux: ``~/.config/strawberryfields`` + * On Windows: ``~C:\Users\USERNAME\AppData\Local\Xanadu\strawberryfields`` + * On MacOS: ``~/Library/Application Support/strawberryfields`` - By default, Strawberry Fields will load the configuration and account credentials from the global - user configuration directory, no matter the working directory. However, if there exists a configuration - file in the *local* working directory, this takes precedence. The ``"local"`` option is therefore useful - for maintaining per-project configuration settings. + By default, Strawberry Fields will load the configuration and account credentials from the global + user configuration directory, no matter the working directory. However, if there exists a configuration + file in the *local* working directory, this takes precedence. The ``"local"`` option is therefore useful + for maintaining per-project configuration settings. **Examples:** From 913840b9bd34c4af397fe56ff04f1013a6b5ac52 Mon Sep 17 00:00:00 2001 From: antalszava Date: Tue, 3 Mar 2020 19:30:46 -0500 Subject: [PATCH 306/335] Change default hostname (#309) * localhost -> platform.xanadu.ai * Changing hostname occurrences to platform.strawberryfields.ai --- default_config.toml | 2 +- doc/introduction/configuration.rst | 4 ++-- strawberryfields/configuration.py | 6 +++--- tests/frontend/test_configuration.py | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/default_config.toml b/default_config.toml index 2d0d55bfc..12682a1c1 100644 --- a/default_config.toml +++ b/default_config.toml @@ -7,6 +7,6 @@ # Fill in your authentication authentication_token = None # example token form: 071cdcce-9241-4965-93af-4a4dbc739135 # Fill in the hostname of the Cloud API -hostname = "localhost" +hostname = "platform.strawberryfields.ai" # Whether Strawberry Fields should use SSL to connect to the API use_ssl = true diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index 67a42be01..b4de977d6 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -31,7 +31,7 @@ and has the following format: [api] # Options for the Strawberry Fields cloud API authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" - hostname = "localhost" + hostname = "platform.strawberryfields.ai" use_ssl = true port = 443 @@ -44,7 +44,7 @@ Configuration options environment variable: ``SF_API_AUTHENTICATION_TOKEN`` **hostname (str)** (*optional*) - The hostname of the server to connect to. Defaults to ``localhost``. Must + The hostname of the server to connect to. Defaults to ``platform.strawberryfields.ai``. Must be one of the allowed hosts. Corresponding environment variable: ``SF_API_HOSTNAME`` diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index cdb07ece3..75f6185e9 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -32,7 +32,7 @@ DEFAULT_CONFIG_SPEC = { "api": { "authentication_token": (str, ""), - "hostname": (str, "localhost"), + "hostname": (str, "platform.strawberryfields.ai"), "use_ssl": (bool, True), "port": (int, 443), } @@ -102,7 +102,7 @@ def create_config(authentication_token="", **kwargs): dict[str, dict[str, Union[str, bool, int]]]: the configuration object """ - hostname = kwargs.get("hostname", "localhost") + hostname = kwargs.get("hostname", DEFAULT_CONFIG_SPEC["api"]["hostname"][1]) use_ssl = kwargs.get("use_ssl", DEFAULT_CONFIG_SPEC["api"]["use_ssl"][1]) port = kwargs.get("port", DEFAULT_CONFIG_SPEC["api"]["port"][1]) @@ -263,7 +263,7 @@ def store_account(authentication_token, filename="config.toml", location="user_c [api] authentication_token = "MyToken" - hostname = "localhost" + hostname = "platform.strawberryfields.ai" use_ssl = true port = 443 diff --git a/tests/frontend/test_configuration.py b/tests/frontend/test_configuration.py index e6657500d..140bc35ab 100644 --- a/tests/frontend/test_configuration.py +++ b/tests/frontend/test_configuration.py @@ -29,7 +29,7 @@ [api] # Options for the Strawberry Fields Cloud API authentication_token = "071cdcce-9241-4965-93af-4a4dbc739135" -hostname = "localhost" +hostname = "platform.strawberryfields.ai" use_ssl = true port = 443 """ @@ -43,7 +43,7 @@ EXPECTED_CONFIG = { "api": { "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", - "hostname": "localhost", + "hostname": "platform.strawberryfields.ai", "use_ssl": True, "port": 443, } @@ -303,7 +303,7 @@ def test_valid_and_invalid_options(self): def test_only_valid_options(self): section_config_only_valid = { "authentication_token": "071cdcce-9241-4965-93af-4a4dbc739135", - "hostname": "localhost", + "hostname": "platform.strawberryfields.ai", "use_ssl": True, "port": 443, } @@ -398,7 +398,7 @@ def test_parse_environment_variable_integer(self, monkeypatch): assert conf.parse_environment_variable("some_integer", "123") == 123 -DEFAULT_KWARGS = {"hostname": "localhost", "use_ssl": True, "port": 443} +DEFAULT_KWARGS = {"hostname": "platform.strawberryfields.ai", "use_ssl": True, "port": 443} class MockSaveConfigToFile: From ae0aa331487861a869c8cf20541b462d8b0e8012 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 13:02:46 +1030 Subject: [PATCH 307/335] fix tutorial teleportation --- doc/tutorials/tutorial_teleportation.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/tutorial_teleportation.rst b/doc/tutorials/tutorial_teleportation.rst index 9ad41e78c..bcd7d2674 100644 --- a/doc/tutorials/tutorial_teleportation.rst +++ b/doc/tutorials/tutorial_teleportation.rst @@ -170,7 +170,7 @@ We can now execute our quantum program ``prog`` on the engine via the :func:`Eng .. code-block:: python - result = eng.run(prog, run_options={shots=1, modes=None}, compile_options={}) + result = eng.run(prog, run_options={"shots":1, "modes":None}, compile_options={}) The :meth:`eng.run ` method accepts the arguments: @@ -239,7 +239,7 @@ Once the engine has been run, we can extract results of measurements and the qua .. code-block:: pycon - >>> results.samples + >>> result.samples [2.9645296452964534, -2.9465294652946525, None] If a mode has not been measured, this attribute simply returns ``None``. From 652cdf875d93497515673a259f03a118bb0c5a7d Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 13:04:55 +1030 Subject: [PATCH 308/335] fix post-selection tutorial --- doc/tutorials/tutorial_post_selection.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/tutorials/tutorial_post_selection.rst b/doc/tutorials/tutorial_post_selection.rst index d0784ebe3..3fcf91898 100644 --- a/doc/tutorials/tutorial_post_selection.rst +++ b/doc/tutorials/tutorial_post_selection.rst @@ -172,11 +172,10 @@ We can simulate this conditional displacement using post-selection. Utilizing th eng = sf.Engine("gaussian") with prog.context as q: - with eng: S2gate(1) | (q[0], q[1]) MeasureHomodyne(0, select=1) | q[0] - state = eng.run('gaussian').state + state = eng.run(prog).state To check the displacement of the second output mode, we can use the :meth:`~.BaseGaussianState.reduced_gaussian` state method to extract the vector of means and the covariance matrix: From 7cf60f255c6f432e0398b0de750d77dbfb0729b5 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 13:47:16 +1030 Subject: [PATCH 309/335] updated more tutorials --- doc/tutorials/tutorial_boson_sampling.rst | 106 +++++++++++++++++++--- examples/boson_sampling.py | 17 +++- setup.py | 5 +- 3 files changed, 111 insertions(+), 17 deletions(-) diff --git a/doc/tutorials/tutorial_boson_sampling.rst b/doc/tutorials/tutorial_boson_sampling.rst index c4fcfed62..97d6e7d28 100644 --- a/doc/tutorials/tutorial_boson_sampling.rst +++ b/doc/tutorials/tutorial_boson_sampling.rst @@ -162,6 +162,90 @@ We find that -0.1566+0.2246i & 0.1100-0.1638i & -0.4212+0.1836i & 0.8188+0.068i \end{matrix}\right] + +.. note:: + + While we have done it by hand, Strawberry Fields also supports a Gaussian unitary + compiler, that allows us to compile our program into a single Gaussian unitary. + + Note that we must create a new program without the initial Fock states, + as the Gaussian unitary compiler only works with *Gaussian operations*. + Compiling the program: + + >>> prog_unitary = sf.Program(4) + >>> prog_unitary.circuit = boson_sampling.circuit[4:] + >>> prog_compiled = prog_unitary.compile("gaussian_unitary") + + Printing ``prog_compiled``, we see it now consists of a single + :class:`~.GaussianTransform` operation, consisting of a single + symplectic matrix: + + >>> prog_compiled.print() + GaussianTransform([[ 0.2195 0.6111 -0.1027 -0.0273 0.2565 -0.5242 -0.4745 -0.0373] + [ 0.4513 0.457 0.1316 0.0353 -0.6026 -0.0123 0.4504 0.0532] + [ 0.0387 -0.0192 -0.2408 -0.4584 -0.4927 0.3218 -0.5244 -0.3296] + [-0.1566 0.11 -0.4212 0.8188 -0.2246 0.1638 -0.1836 -0.068 ] + [-0.2565 0.5242 0.4745 0.0373 0.2195 0.6111 -0.1027 -0.0273] + [ 0.6026 0.0123 -0.4504 -0.0532 0.4513 0.457 0.1316 0.0353] + [ 0.4927 -0.3218 0.5244 0.3296 0.0387 -0.0192 -0.2408 -0.4584] + [ 0.2246 -0.1638 0.1836 0.068 -0.1566 0.11 -0.4212 0.8188]]) | (q[0], q[1], q[2], q[3]) + + We can easily extract this symplectic matrix, and rewrite it as a unitary + matrix: + + >>> S = prog_compiled.circuit[0].op.p[0] + >>> U = S[:4, :4] + 1j*S[4:, :4] + >>> U + [[ 0.2195-0.2565j, 0.6111+0.5242j, -0.1027+0.4745j, -0.0273+0.0373j], + [ 0.4513+0.6026j, 0.457 +0.0123j, 0.1316-0.4504j, 0.0353-0.0532j], + [ 0.0387+0.4927j, -0.0192-0.3218j, -0.2408+0.5244j, -0.4584+0.3296j], + [-0.1566+0.2246j, 0.11 -0.1638j, -0.4212+0.1836j, 0.8188+0.068j ]] + + which agrees with the result above. + +.. note:: + + Strawberry Fields supports the :class:`~.Interferometer` operation, which + allows numeric unitary matrices to be directly embedded in programs and + decomposed into beamsplitters and rotation gates: + + .. code-block:: python3 + + boson_sampling = sf.Program(4) + + with boson_sampling.context as q: + # prepare the input fock states + Fock(1) | q[0] + Fock(1) | q[1] + Vac | q[2] + Fock(1) | q[3] + + Interferometer(U) | q + + Compiling this for the Fock backend, and printing the result: + + >>> boson_sampling.compile("fock").print() + Fock(1) | (q[0]) + Fock(1) | (q[1]) + Vac | (q[2]) + Fock(1) | (q[3]) + Rgate(-3.124) | (q[0]) + BSgate(0.9465, 0) | (q[0], q[1]) + Rgate(2.724) | (q[2]) + BSgate(0.09485, 0) | (q[2], q[3]) + Rgate(-0.9705) | (q[1]) + BSgate(0.7263, 0) | (q[1], q[2]) + Rgate(-1.788) | (q[0]) + BSgate(0.8246, 0) | (q[0], q[1]) + Rgate(-0.9397) | (q[0]) + Rgate(2.93) | (q[1]) + Rgate(3.133) | (q[2]) + Rgate(0.07904) | (q[3]) + BSgate(-0.533, 0) | (q[2], q[3]) + Rgate(2.45) | (q[2]) + BSgate(-0.03962, 0) | (q[1], q[2]) + Rgate(2.508) | (q[1]) + Analysis ========= @@ -207,21 +291,21 @@ Comparing this to the result from Strawberry Fields, we can see that they only d They agree with almost negligable error! This is due to the high accuracy of the Fock backend, despite the Fock state truncation/cutoff. - This close result stands for any other output Fock state measurement that preserves the photon number, for example: +This close result stands for any other output Fock state measurement that preserves the photon number, for example: - * :math:`\ket{3,0,0,0}\bra{3,0,0,0}`: +* :math:`\ket{3,0,0,0}\bra{3,0,0,0}`: - >>> probs[3,0,0,0] - 0.00094584833471324822 - >>> np.abs(perm(U[:,[0,1,3]][[0,0,0]]))**2 / 6 - 0.00094584833471324887 +>>> probs[3,0,0,0] +0.00094584833471324822 +>>> np.abs(perm(U[:,[0,1,3]][[0,0,0]]))**2 / 6 +0.00094584833471324887 - * :math:`\ket{1,1,0,1}\bra{1,1,0,1}`: +* :math:`\ket{1,1,0,1}\bra{1,1,0,1}`: - >>> probs[1,1,0,1] - 0.17468916048563926 - >>> np.abs(perm(U[:,[0,1,3]][[0,1,3]]))**2 / 1 - 0.17468916048563934 +>>> probs[1,1,0,1] +0.17468916048563926 +>>> np.abs(perm(U[:,[0,1,3]][[0,1,3]]))**2 / 1 +0.17468916048563934 .. note:: diff --git a/examples/boson_sampling.py b/examples/boson_sampling.py index a6971e427..7db95b39b 100644 --- a/examples/boson_sampling.py +++ b/examples/boson_sampling.py @@ -6,6 +6,13 @@ eng = sf.Engine(backend="fock", backend_options={"cutoff_dim": 7}) boson_sampling = sf.Program(4) +import numpy as np +U = np.array( + [[ 0.2195-0.2565j, 0.6111+0.5242j, -0.1027+0.4745j, -0.0273+0.0373j], + [ 0.4513+0.6026j, 0.457 +0.0123j, 0.1316-0.4504j, 0.0353-0.0532j], + [ 0.0387+0.4927j, -0.0192-0.3218j, -0.2408+0.5244j, -0.4584+0.3296j], + [-0.1566+0.2246j, 0.11 -0.1638j, -0.4212+0.1836j, 0.8188+0.068j ]]) + with boson_sampling.context as q: # prepare the input fock states Fock(1) | q[0] @@ -13,11 +20,13 @@ Vac | q[2] Fock(1) | q[3] + Interferometer(U) | q + # rotation gates - Rgate(0.5719) - Rgate(-1.9782) - Rgate(2.0603) - Rgate(0.0644) + Rgate(0.5719) | q[0] + Rgate(-1.9782) | q[1] + Rgate(2.0603) | q[2] + Rgate(0.0644) | q[3] # beamsplitter array BSgate(0.7804, 0.8578) | (q[0], q[1]) diff --git a/setup.py b/setup.py index 8f2182772..c177f230b 100644 --- a/setup.py +++ b/setup.py @@ -26,9 +26,9 @@ "scipy>=1.0.0", "sympy>=1.5", "networkx>=2.0", - "quantum-blackbird>=0.2.0", + "quantum-blackbird>=0.2.3", "python-dateutil>=2.8.0", - "thewalrus>=0.10", + "thewalrus>=0.11", "numba", "toml", "appdirs", @@ -80,6 +80,7 @@ "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering :: Physics", ] From cf9800f23ed9d8e1fd5f2ed4d65718f9851318c7 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 14:14:22 +1030 Subject: [PATCH 310/335] updated build --- doc/conf.py | 5 ++- doc/index.rst | 1 + doc/introduction/configuration.rst | 2 -- doc/introduction/starship.rst | 39 ++++++++++----------- doc/introduction/tutorials.rst | 4 +-- doc/requirements.txt | 1 + doc/tutorials/tutorial_machine_learning.rst | 2 ++ examples/boson_sampling.py | 9 ----- 8 files changed, 28 insertions(+), 35 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index e7541e90b..5a1b5c098 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -58,9 +58,12 @@ def __getattr__(cls, name): 'sphinx_gallery.gen_gallery', "sphinx.ext.intersphinx", "sphinx_automodapi.automodapi", - 'sphinx_copybutton' + 'sphinx_copybutton', + "m2r" ] +source_suffix = ['.rst', '.md'] + from glob import glob import shutil import warnings diff --git a/doc/index.rst b/doc/index.rst index 947057ae8..12869683e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -164,6 +164,7 @@ Strawberry Fields is **free** and **open source**, released under the Apache Lic development/development_guide development/research + development/release_notes.md .. toctree:: :maxdepth: 1 diff --git a/doc/introduction/configuration.rst b/doc/introduction/configuration.rst index b4de977d6..55b1cad68 100644 --- a/doc/introduction/configuration.rst +++ b/doc/introduction/configuration.rst @@ -67,7 +67,6 @@ In these examples ``"MyToken"`` should be replaced with a valid authentication t .. code-block:: python - import strawberryfields as sf sf.store_account("MyToken") .. note:: @@ -89,7 +88,6 @@ Strawberry Fields: .. code-block:: python - import strawberryfields as sf sf.store_account("MyToken", location="local") For more detailed examples, visit the :func:`~.store_account` diff --git a/doc/introduction/starship.rst b/doc/introduction/starship.rst index 940991e32..984b54550 100644 --- a/doc/introduction/starship.rst +++ b/doc/introduction/starship.rst @@ -3,8 +3,6 @@ StarshipEngine ############## -.. sectionauthor:: Zeid Zabaneh - In this section, we provide a tutorial of the **StarshipEngine**, an engine used to connect to the Xanadu cloud platform and execute jobs remotely (e.g., on a quantum chip). @@ -12,33 +10,32 @@ Configuring StarshipEngine -------------------------- Before using StarshipEngine, you need to configure the hostname and authentication token that will provide -you access to the API. The easiest way is to create a configuration file named ``config.toml`` in your -working directory. A typical file looks like this: - -.. code-block:: toml +you access to the API. This can be done in one of two ways: - [api] - hostname = "platform.strawberryfields.ai" - authentication_token = "ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc" +1. By using the :func:`~.store_account` function to store your account credentials: -You can generate this file interactively by using the ``starship`` command as follows, answering the questions in the prompts. + >>> sf.store_account("my_api_token") -.. code-block:: text +2. By using the Starship command line interface to configure Strawberry Fields with your + account credentials: - $ starship --reconfigure - Please enter the hostname of the server to connect to: [localhost] platform.strawberryfields.ai - Please enter the authentication token to use when connecting: [] ElUFm3O6m6q1DXPmpi5g4hWEhYHXFxBc - Would you like to save these settings to a local cofiguration file in the current directory? [Y/n] y - Writing configuration file to current working directory... + >>> starship configure --token my_api_token +In both of the above code snippets, ``my_api_token`` should be replaced with your personal +API token! For more details on configuring Strawberry Fields for cloud access, including +creating local per-project configuration files, see the :doc:`/introduction/configuration` +quickstart guide. -To test connectivity, you can use the following command: +To test that your account credentials correctly authenticate against the cloud platform, +you can use the ``ping`` command, from within Strawberry Fields, -.. code-block:: text +>>> sf.cli.ping() +You have successfully authenticated to the platform! - $ starship --hello - You have successfully authenticated to the platform! +or via the command line: +>>> starship --ping +You have successfully authenticated to the platform! .. _first_program: @@ -140,7 +137,7 @@ To execute this file from the command line, use the ``starship`` command as foll .. code-block:: console - starship --input test.xbb --output out.txt + starship run test.xbb --output out.txt After executing the above command, the result will be stored in ``out.txt`` in the current working directory. You can also omit the ``--output`` parameter to print the result to the screen. diff --git a/doc/introduction/tutorials.rst b/doc/introduction/tutorials.rst index e9ad51849..06745a0ac 100644 --- a/doc/introduction/tutorials.rst +++ b/doc/introduction/tutorials.rst @@ -134,8 +134,8 @@ Algorithms

-Demo ----- +Hardware +-------- .. toctree:: :hidden: diff --git a/doc/requirements.txt b/doc/requirements.txt index 946a485b2..95636c92a 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,6 +1,7 @@ appdirs ipykernel sphinx==2.2.2 +m2r nbsphinx networkx>=2.0 numpy>=1.16.3 diff --git a/doc/tutorials/tutorial_machine_learning.rst b/doc/tutorials/tutorial_machine_learning.rst index f8ee821c1..28ab0df94 100644 --- a/doc/tutorials/tutorial_machine_learning.rst +++ b/doc/tutorials/tutorial_machine_learning.rst @@ -3,6 +3,8 @@ Optimization & machine learning ############################### +.. warning:: We are currently making some huge changes to the TensorFlow backend of Strawberry Fields, including upcoming TensorFlow 2.0 support. As a result, this tutorial currently is unavailable. Please bare with us as we finish off the work on our new TensorFlow backend! + .. note:: The content in this page is suited to more advanced users who already have an understanding of Strawberry Fields, e.g., those who have completed the :ref:`teleportation tutorial `. Some basic knowledge of `Tensorflow `_ is also helpful. In this page, we show how the user can carry out optimization and machine learning on quantum diff --git a/examples/boson_sampling.py b/examples/boson_sampling.py index 7db95b39b..3fea9c2f8 100644 --- a/examples/boson_sampling.py +++ b/examples/boson_sampling.py @@ -6,13 +6,6 @@ eng = sf.Engine(backend="fock", backend_options={"cutoff_dim": 7}) boson_sampling = sf.Program(4) -import numpy as np -U = np.array( - [[ 0.2195-0.2565j, 0.6111+0.5242j, -0.1027+0.4745j, -0.0273+0.0373j], - [ 0.4513+0.6026j, 0.457 +0.0123j, 0.1316-0.4504j, 0.0353-0.0532j], - [ 0.0387+0.4927j, -0.0192-0.3218j, -0.2408+0.5244j, -0.4584+0.3296j], - [-0.1566+0.2246j, 0.11 -0.1638j, -0.4212+0.1836j, 0.8188+0.068j ]]) - with boson_sampling.context as q: # prepare the input fock states Fock(1) | q[0] @@ -20,8 +13,6 @@ Vac | q[2] Fock(1) | q[3] - Interferometer(U) | q - # rotation gates Rgate(0.5719) | q[0] Rgate(-1.9782) | q[1] From b7094237e7a0c11f8a8810eb0258b47f98319845 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 14:14:50 +1030 Subject: [PATCH 311/335] added new --- doc/development/release_notes.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 doc/development/release_notes.md diff --git a/doc/development/release_notes.md b/doc/development/release_notes.md new file mode 100644 index 000000000..ec160f655 --- /dev/null +++ b/doc/development/release_notes.md @@ -0,0 +1,6 @@ +Release notes +------------- + +This page contains the release notes for PennyLane. + +.. mdinclude:: ../../.github/CHANGELOG.md From b490bafe39d15c37601caca2964184c78d2073cc Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 14:45:59 +1030 Subject: [PATCH 312/335] documentation fixes --- .github/CHANGELOG.md | 202 ++++++++++++++---------------- doc/code/sf_configuration.rst | 3 + doc/introduction/tutorials.rst | 44 +++++-- strawberryfields/api/job.py | 2 +- strawberryfields/configuration.py | 6 - 5 files changed, 133 insertions(+), 124 deletions(-) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index ab051462b..b6f2977bc 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -1,65 +1,66 @@ -# Release 0.13.0-dev +# Release 0.13.0-dev (development release) -### New features +

New features since last release

* Adds the `x_quad_values` and `p_quad_values` methods to the `state` class. This allows calculation of x and p quadrature probability distributions by integrating across the Wigner function. - [#270](https://github.com/XanaduAI/strawberryfields/pull/270) + [(270)](https://github.com/XanaduAI/strawberryfields/pull/270) * Adds support in the applications layer for node-weighted graphs. Users can sample from graphs with node weights using the WAW encoding and input node weights into search algorithms in the `clique` and `subgraph` modules. - [#295](https://github.com/XanaduAI/strawberryfields/pull/295) - [#296](https://github.com/XanaduAI/strawberryfields/pull/296) - [#297](https://github.com/XanaduAI/strawberryfields/pull/297) + [(295)](https://github.com/XanaduAI/strawberryfields/pull/295) + [(296)](https://github.com/XanaduAI/strawberryfields/pull/296) + [(297)](https://github.com/XanaduAI/strawberryfields/pull/297) + +

Improvements

-### Improvements * Added the `store_account` user convenience function that helps with configuring access to the Xanadu cloud platform. - [#306](https://github.com/XanaduAI/strawberryfields/pull/306) + [(306)](https://github.com/XanaduAI/strawberryfields/pull/306) * Added two-mode squeezed operation support as a primitive, rather than simply through decomposition, using The Walrus for fast computation. - [#289](https://github.com/XanaduAI/strawberryfields/pull/289) + [(289)](https://github.com/XanaduAI/strawberryfields/pull/289) * Added The Walrus implementations for the displacement, squeezing and beamsplitter operations to improve speed. - [#287](https://github.com/XanaduAI/strawberryfields/pull/287) + [(287)](https://github.com/XanaduAI/strawberryfields/pull/287) * Added custom tensor contractions for the beamsplitter and the two-mode squeeze gate as well as faster application of diagonal gate matrices. - [#292](https://github.com/XanaduAI/strawberryfields/pull/292) + [(292)](https://github.com/XanaduAI/strawberryfields/pull/292) * Moved apply-gate functions to `Circuit` class, and removed `apply_gate_einsum` and `Circuits._apply_gate`, since they were no longer used. - [#293](https://github.com/XanaduAI/strawberryfields/pull/293/) - + [(293)](https://github.com/XanaduAI/strawberryfields/pull/293/) + * Unified backend results returned from running simulators and added checks for using batching, post-selection and feed-fowarding together with multiple shots, which now raises an error. - [#300](https://github.com/XanaduAI/strawberryfields/pull/300) + [(300)](https://github.com/XanaduAI/strawberryfields/pull/300) * Replaced the `Configuration` class with the `load_config` and auxiliary functions to load configuration from keyword arguments, environment variables and configuration file. - [#298](https://github.com/XanaduAI/strawberryfields/pull/298) + [(298)](https://github.com/XanaduAI/strawberryfields/pull/298) * Refactored the existing `StarshipEngine` to use a new `Connection`/`Job` API and updated the `starship` CLI to use the new interface. - [#294](https://github.com/XanaduAI/strawberryfields/pull/294) + [(294)](https://github.com/XanaduAI/strawberryfields/pull/294) -### Bug fixes +

Bug fixes

* Symbolic Operation parameters are now compatible with TensorFlow 2.0 objects. - [#282](https://github.com/XanaduAI/strawberryfields/pull/282) + [(282)](https://github.com/XanaduAI/strawberryfields/pull/282) * Added `sympy>=1.5` to the list of dependencies. Removed the `sympy.functions.atan2` workaround now that SymPy has been fixed. - [#280](https://github.com/XanaduAI/strawberryfields/pull/280) + [(280)](https://github.com/XanaduAI/strawberryfields/pull/280) * Removed two unnecessary else statements that pylint complained about. - [#290](https://github.com/XanaduAI/strawberryfields/pull/290) + [(290)](https://github.com/XanaduAI/strawberryfields/pull/290) * Fixed a bug in the `MZgate`, where the internal and external phases were in the wrong order in both the docstring and the argument list. The new @@ -68,53 +69,51 @@ [(#301)](https://github.com/XanaduAI/strawberryfields/pull/301) -### Contributors +

Contributors

This release contains contributions from (in alphabetical order): -Ville Bergholm, Jack Ceroni, Theodor Isacsson, Antal Száva +Ville Bergholm, Jack Ceroni, Theodor Isacsson, Antal Száva, Paul Tan ---- -# Release 0.12.1 +# Release 0.12.1 (current release) -### New features +

New features

* A new `gaussian_unitary` circuitspec that can be used to compile any sequency of Gaussian transformations into a single `GaussianTransform` gate and a sequence of single mode `Dgate`s. - [#238](https://github.com/XanaduAI/strawberryfields/pull/238) + [(238)](https://github.com/XanaduAI/strawberryfields/pull/238) -### Improvements +

Improvements

* Add new Strawberry Fields applications paper to documentation - [#274](https://github.com/XanaduAI/strawberryfields/pull/274) + [(274)](https://github.com/XanaduAI/strawberryfields/pull/274) * Update figure for GBS device in documentation - [#275](https://github.com/XanaduAI/strawberryfields/pull/275) + [(275)](https://github.com/XanaduAI/strawberryfields/pull/275) -### Bug fixes +

Bug fixes

* Fix installation issue with incorrect minimum version number for `thewalrus` - [#272](https://github.com/XanaduAI/strawberryfields/pull/272) - [#277](https://github.com/XanaduAI/strawberryfields/pull/277) + [(272)](https://github.com/XanaduAI/strawberryfields/pull/272) + [(277)](https://github.com/XanaduAI/strawberryfields/pull/277) * Correct URL for image in `README` - [#273](https://github.com/XanaduAI/strawberryfields/pull/273) + [(273)](https://github.com/XanaduAI/strawberryfields/pull/273) * Add applications data to `MANIFEST.in` - [#278](https://github.com/XanaduAI/strawberryfields/pull/278) + [(278)](https://github.com/XanaduAI/strawberryfields/pull/278) -### Contributors +

Contributors

This release contains contributions from (in alphabetical order): Ville Bergholm, Tom Bromley, Nicolás Quesada, Paul Tan ---- # Release 0.12.0 -### New features +

New features

* A new applications layer, allowing users to interface samples generated from near-term photonic devices with problems of practical interest. The `apps` package consists of the following @@ -139,39 +138,38 @@ Ville Bergholm, Tom Bromley, Nicolás Quesada, Paul Tan - The `apps.vibronic` module, providing functionality to construct the vibronic absorption spectrum of a molecule from GBS samples. -### Improvements +

Improvements

* The documentation was improved and refactored. Changes include: - A brand new theme, now matching PennyLane - [#262](https://github.com/XanaduAI/strawberryfields/pull/262) + [(262)](https://github.com/XanaduAI/strawberryfields/pull/262) - The documentation has been restructured to make it easier to navigate - [#266](https://github.com/XanaduAI/strawberryfields/pull/266) + [(266)](https://github.com/XanaduAI/strawberryfields/pull/266) -### Contributors +

Contributors

This release contains contributions from (in alphabetical order): Juan Miguel Arrazola, Tom Bromley, Josh Izaac, Soran Jahangiri, Nicolás Quesada ---- # Release 0.11.2 -### New features +

New features

* Adds the MZgate to ops.py, representing a Mach-Zehnder interferometer. This is not a primitive of the existing simulator backends; rather, `_decompose()` is defined, decomposing it into an external phase shift, two 50-50 beamsplitters, and an internal phase shift. - [#127](https://github.com/XanaduAI/strawberryfields/pull/127) + [(127)](https://github.com/XanaduAI/strawberryfields/pull/127) * The `Chip0Spec` circuit class now defines a `compile` method, allowing arbitrary unitaries comprised of `{Interferometer, BSgate, Rgate, MZgate}` operations to be validated and compiled to match the topology of chip0. - [#127](https://github.com/XanaduAI/strawberryfields/pull/127) + [(127)](https://github.com/XanaduAI/strawberryfields/pull/127) * `strawberryfields.ops.BipartiteGraphEmbed` quantum decomposition now added, allowing a bipartite graph to be embedded on a device that allows for @@ -179,21 +177,21 @@ Juan Miguel Arrazola, Tom Bromley, Josh Izaac, Soran Jahangiri, Nicolás Quesada * Added threshold measurements, via the new operation `MeasureThreshold`, and provided implementation of this operation in the Gaussian backend. - [#152](https://github.com/XanaduAI/strawberryfields/pull/152) + [(152)](https://github.com/XanaduAI/strawberryfields/pull/152) * Programs can now have free parameters/arguments which are only bound to numerical values when the Program is executed, by supplying the actual argument values to the `Engine.run` method. - [#163](https://github.com/XanaduAI/strawberryfields/pull/163) + [(163)](https://github.com/XanaduAI/strawberryfields/pull/163) -### API Changes +

API Changes

* The `strawberryfields.ops.Measure` shorthand has been deprecated in favour of `strawberryfields.ops.MeasureFock()`. - [#145](https://github.com/XanaduAI/strawberryfields/pull/145) + [(145)](https://github.com/XanaduAI/strawberryfields/pull/145) * Several changes to the `strawberryfields.decompositions` module: - [#127](https://github.com/XanaduAI/strawberryfields/pull/127) + [(127)](https://github.com/XanaduAI/strawberryfields/pull/127) - The name `clements` has been replaced with `rectangular` to correspond with the shape of the resulting decomposition. @@ -203,7 +201,7 @@ Juan Miguel Arrazola, Tom Bromley, Josh Izaac, Soran Jahangiri, Nicolás Quesada `(tlist, diag, tilist)`, so they can easily be swapped. * Several changes to `ops.Interferometer`: - [#127](https://github.com/XanaduAI/strawberryfields/pull/127) + [(127)](https://github.com/XanaduAI/strawberryfields/pull/127) - The calculation of the ops.Interferometer decomposition has been moved from `__init__` to `_decompose()`, allowing the interferometer decomposition type @@ -219,71 +217,70 @@ Juan Miguel Arrazola, Tom Bromley, Josh Izaac, Soran Jahangiri, Nicolás Quesada * Moves the `Program.compile_seq` method to `CircuitSpecs.decompose`. This allows it to be accessed from the `CircuitSpec.compile` method. Furthermore, it now must also be passed the program registers, as compilation may sometimes require this. - [#127](https://github.com/XanaduAI/strawberryfields/pull/127) + [(127)](https://github.com/XanaduAI/strawberryfields/pull/127) * Parameter class is replaced by `MeasuredParameter` and `FreeParameter`, both inheriting from `sympy.Symbol`. Fixed numeric parameters are handled by the built-in Python numeric classes and numpy arrays. - [#163](https://github.com/XanaduAI/strawberryfields/pull/163) + [(163)](https://github.com/XanaduAI/strawberryfields/pull/163) * `Parameter`, `RegRefTransform` and `convert` are removed. - [#163](https://github.com/XanaduAI/strawberryfields/pull/163) + [(163)](https://github.com/XanaduAI/strawberryfields/pull/163) -### Improvements +

Improvements

* Photon-counting measurements can now be done in the Gaussian backend for states with nonzero displacement. - [#154](https://github.com/XanaduAI/strawberryfields/pull/154) + [(154)](https://github.com/XanaduAI/strawberryfields/pull/154) * Added a new test for the cubic phase gate - [#160](https://github.com/XanaduAI/strawberryfields/pull/160) + [(160)](https://github.com/XanaduAI/strawberryfields/pull/160) * Added new integration tests for the Gaussian gates that are not primitive, i.e., P, CX, CZ, and S2. - [#173](https://github.com/XanaduAI/strawberryfields/pull/173) + [(173)](https://github.com/XanaduAI/strawberryfields/pull/173) -### Bug fixes +

Bug fixes

* Fixed bug in `strawberryfields.decompositions.rectangular_symmetric` so its returned phases are all in the interval [0, 2*pi), and corrects the function docstring. - [#196](https://github.com/XanaduAI/strawberryfields/pull/196) + [(196)](https://github.com/XanaduAI/strawberryfields/pull/196) * When using the `'gbs'` compilation target, the measured registers are now sorted in ascending order in the resulting compiled program. - [#144](https://github.com/XanaduAI/strawberryfields/pull/144) + [(144)](https://github.com/XanaduAI/strawberryfields/pull/144) * Fixed typo in the Gaussian Boson Sampling example notebook. - [#133](https://github.com/XanaduAI/strawberryfields/pull/133) + [(133)](https://github.com/XanaduAI/strawberryfields/pull/133) * Fixed a bug in the function `smeanxp` of the Gaussian Backend simulator. - [#154](https://github.com/XanaduAI/strawberryfields/pull/154) + [(154)](https://github.com/XanaduAI/strawberryfields/pull/154) * Clarified description of matrices that are accepted by graph embed operation. - [#147](https://github.com/XanaduAI/strawberryfields/pull/147) + [(147)](https://github.com/XanaduAI/strawberryfields/pull/147) * Fixed typos in the documentation of the CX gate and BSgate - [#166](https://github.com/XanaduAI/strawberryfields/pull/166) - [#167](https://github.com/XanaduAI/strawberryfields/pull/167) - [#169](https://github.com/XanaduAI/strawberryfields/pull/169) + [(166)](https://github.com/XanaduAI/strawberryfields/pull/166) + [(167)](https://github.com/XanaduAI/strawberryfields/pull/167) + [(169)](https://github.com/XanaduAI/strawberryfields/pull/169) ---- # Release 0.11.1 -### Improvements +

Improvements

* Added the `circuit_spec` attribute to `BaseBackend` to denote which CircuitSpecs class should be used to validate programs for each backend - [#125](https://github.com/XanaduAI/strawberryfields/pull/125). + [(125)](https://github.com/XanaduAI/strawberryfields/pull/125). * Removed the `return_state` keyword argument from `LocalEngine.run()`. Now no state object is returned if `modes==[]`. - [#126](https://github.com/XanaduAI/strawberryfields/pull/126) + [(126)](https://github.com/XanaduAI/strawberryfields/pull/126) * Fixed a typo in the boson sampling tutorial. - [#133](https://github.com/XanaduAI/strawberryfields/pull/133) + [(133)](https://github.com/XanaduAI/strawberryfields/pull/133) -### Bug fixes +

Bug fixes

* Allows imported Blackbird programs to store `target` options as default run options. During eng.run, if no run options are provided @@ -291,23 +288,22 @@ Juan Miguel Arrazola, Tom Bromley, Josh Izaac, Soran Jahangiri, Nicolás Quesada within the program. This fixes a bug where shots specified in Blackbird scripts were not being passed to `eng.run`. - [#130](https://github.com/XanaduAI/strawberryfields/pull/130) + [(130)](https://github.com/XanaduAI/strawberryfields/pull/130) * Removes `ModuleNotFoundError` from the codebase, replacing all occurrences with `ImportError`. Since `ModuleNotFoundError` was only introduced in Python 3.6+, this fixes a bug where Strawberry Fields was not importable on Python 3.5 - [#124](https://github.com/XanaduAI/strawberryfields/pull/124). + [(124)](https://github.com/XanaduAI/strawberryfields/pull/124). * Updates the Chip0 template to use `MeasureFock() | [0, 1, 2, 3]`, which will allow correct fock measurement behaviour when simulated on the Gaussian backend - [#124](https://github.com/XanaduAI/strawberryfields/pull/124). + [(124)](https://github.com/XanaduAI/strawberryfields/pull/124). * Fixed a bug in the `GraphEmbed` op, which was not correctly determining when a unitary was the identity - [#128](https://github.com/XanaduAI/strawberryfields/pull/128). + [(128)](https://github.com/XanaduAI/strawberryfields/pull/128). ---- # Release 0.11.0 @@ -341,7 +337,7 @@ ket = results.state.ket() print(results.samples[0]) ``` -### New features +

New features

- The functionality of the `Engine` class has been divided into two new classes: `Program`, which represents a quantum circuit or a fragment thereof, and `Engine`, which executes `Program` instances. @@ -389,7 +385,7 @@ print(results.samples[0]) - Added a glossary to the documentation. -### API Changes +

API Changes

- Added the `circuitspecs` subpackage, containing the `CircuitSpecs` class and a quantum circuit database. @@ -404,7 +400,7 @@ print(results.samples[0]) - `hbar` is now a global, frontend-only variable that the user can set at the beginning of the session. It is used at the `Operation.apply()` level to scale the inputs and outputs of the backend API calls as needed, and inside the `State` objects. - The only backend API calls that need to do hbar scaling for the input parameters are the X, Z, and V gates, the Gaussian state decomposition, and homodyne measurements (both the returned value and postselection argument are scaled). -### Improvements +

Improvements

- Removed TensorFlow as an explicit dependency of Strawberry Fields. Advanced users can still install TensorFlow manually using `pip install tensorflow==1.3` and use as before. @@ -422,7 +418,7 @@ print(results.samples[0]) - Typos in documentation fixed. -## Bug fixes +

Bug fixes

- Fixed a bug with installation on Windows for certain locales. - Fixed a bug in the `New` operation. @@ -431,18 +427,17 @@ print(results.samples[0]) - Fixed a latent bug in `graph_embed`. - Bugfix for Bloch-Messiah returning non-symplectic matrices when input is passive. -### Contributors +

Contributors

This release contains contributions from (in alphabetical order): Ville Bergholm, Tom Bromley, Ish Dhand, Karel Dumon, Xueshi Guo, Josh Izaac, Nathan Killoran, Leonhard Neuhaus, Nicolás Quesada. ---- # Release 0.10 -### New features +

New features

- Added two new utility functions to extract a numerical representation of a circuit from an Engine object: `extract_unitary` and `extract_channel`. @@ -456,7 +451,7 @@ Ville Bergholm, Tom Bromley, Ish Dhand, Karel Dumon, Xueshi Guo, Josh Izaac, Nat - Added documentation to the Quantum Algorithms section on CV quantum neural networks -### Improvements +

Improvements

- Test suite has been ported to pytest @@ -464,11 +459,11 @@ Ville Bergholm, Tom Bromley, Ish Dhand, Karel Dumon, Xueshi Guo, Josh Izaac, Nat - Made corrections to the Clements decomposition documentation and docstring, and fixed the Clements unit tests to ensure they are deterministic. -## Bug fixes +

Bug fixes

- Fixed Bloch-Messiah bug arising when singular values were degenerate. Previously, the Bloch-Messiah decomposition did not return matrices in the canonical symplectic form if one or more of the Bloch-Messiah singular values were degenerate. -### Contributors +

Contributors

This release contains contributions from (in alphabetical order): @@ -477,9 +472,7 @@ Shahnawaz Ahmed, Thomas R. Bromley, Ish Dhand, Marcus Edwards, Christian Gogolin # Release 0.9 -## Summary of changes from 0.8 - -### New features +

New features

- Updated the [Strawberry Fields gallery](https://strawberryfields.readthedocs.io/en/latest/gallery/gallery.html), featuring community-submitted content (tutorials, notebooks, repositories, blog posts, research papers, etc.) using Strawberry Fields @@ -489,13 +482,13 @@ Shahnawaz Ahmed, Thomas R. Bromley, Ish Dhand, Marcus Edwards, Christian Gogolin - Added a `poly_quad_expectation` method to the `state` objects for Gaussian and Fock backends -### Improvements +

Improvements

- New and improved tests - Fixed typos in code/documentation -### Contributors +

Contributors

This release contains contributions from: @@ -504,9 +497,7 @@ Juan Leni, Arthur Pesah, Brianna Gopaul, Nicolás Quesada, Josh Izaac, and Natha # Release 0.8 -## Summary of changes from 0.7 - -### New features +

New features

* You can now prepare multimode states in all backends, via the following new quantum operations in `strawberryfields.ops`: - `Ket` - `DensityMatrix` @@ -519,7 +510,7 @@ Juan Leni, Arthur Pesah, Brianna Gopaul, Nicolás Quesada, Josh Izaac, and Natha * States can now be compared directly for equality - this is defined separately for Gaussian states and Fock basis states. -### Improvements +

Improvements

* The engine logic and behaviour has been overhauled, making it simpler to use and understand. - `eng.run()` and `eng.reset()` now allow the user to alter parameters such as `cutoff_dim` between runs. - `eng.reset_backend()` has been renamed to `eng.reset()`, and now also implicitly resets the queue. @@ -528,27 +519,26 @@ Juan Leni, Arthur Pesah, Brianna Gopaul, Nicolás Quesada, Josh Izaac, and Natha * A new parameter class is introduced - this is a developmental change, and does not affect the user-facing parts of Strawberry Fields. All parameters passed to quantum operations are 'wrapped' in this parameter class, which also contains several high level mathematical and array/tensor manipulation functions and methods. -### Contributors +

Contributors

This release contains contributions from: Ville Bergholm, Christian Gogolin, Nicolás Quesada, Josh Izaac, and Nathan Killoran. ---- # Release 0.7.3 -## New features +

New features

* Added Gaussian decompositions to the front-end; these can be accessed via the new quantum operations `Interferometer`, `GaussianTransform`, `CovarianceState`. These allow you to apply interferometers, Gaussian symplectic transformations, and prepare a state based on a covariance matrix respectively. You can also query the engine to determine the CV gate decompositions applied. * Added utilities for creating random covariance, symplectic, and gaussian unitary matrices in `strawberryfields.utils`. -## Improvements +

Improvements

* Created a separate package `strawberryfields-gpu` that requires `tensorflow-gpu`. * Modified TFBackend to cache non-variable parts of the beamsplitter, to speed up computation. * Minor performance improvement in `fock_prob()` by avoiding inverting a matrix twice. -## Bug fixes +

Bug fixes

* Fixed bug #10 by adding the ability to reset the Fock modeMap and GaussianCircuit class * Fixed bug #11 by reshaping the Fock probabilities if the state happens to be pure states * Fixed Clements decomposition bug where some phase angles weren't applied @@ -556,23 +546,21 @@ Ville Bergholm, Christian Gogolin, Nicolás Quesada, Josh Izaac, and Nathan Kill * Fix to prevent beamsplitter prefactor cache from breaking things if using two graphs * Fix bug #13, GaussianBackend.state() raises an IndexError if all modes in the state have been deleted. ---- # Release 0.7.2 -## Bug fixes +

Bug fixes

* Fixed Tensorflow requirements in `setup.py`, so that installation will now work for versions of tensorflow>=1.3,<1.7 -## Known issues +

Known issues

* Tensorflow version 1.7 introduces some breaking API changes, so is currently not supported by Strawberry Fields. ---- # Release 0.7.1 Initial public release. -### Contributors +

Contributors

This release contains contributions from: Nathan Killoran, Josh Izaac, Nicolás Quesada, Matthew Amy, and Ville Bergholm. diff --git a/doc/code/sf_configuration.rst b/doc/code/sf_configuration.rst index 0dad2eb49..1e189ecdc 100644 --- a/doc/code/sf_configuration.rst +++ b/doc/code/sf_configuration.rst @@ -8,6 +8,9 @@ sf.configuration Unless you are a Strawberry Fields developer, you likely do not need to access this module directly. + See the :doc:`/introduction/configuration` page for more information on + configuring Strawberry Fields. + .. automodapi:: strawberryfields.configuration :no-heading: :skip: user_config_dir diff --git a/doc/introduction/tutorials.rst b/doc/introduction/tutorials.rst index 06745a0ac..95f485bbc 100644 --- a/doc/introduction/tutorials.rst +++ b/doc/introduction/tutorials.rst @@ -99,6 +99,40 @@ Algorithms :description: :doc:`Gaussian boson sampling & the hafnian
` :figure: /_static/gaussian_boson_sampling.png +.. customgalleryitem:: + :tooltip: Scattershot boson sampling + :description: :doc:`/gallery/scattershot-boson-sampling/scattershot-bs` + :figure: /gallery/scattershot-boson-sampling/scattershot-bs.gif + +.. customgalleryitem:: + :tooltip: Photonic gate visualization + :description: :doc:`/gallery/gate_visualisation/GateVisualisation` + :figure: /gallery/gate_visualisation/GateVisualisation.gif + +.. raw:: html + +
+
+ + +Optimization and machine learning +--------------------------------- + +.. toctree:: + :hidden: + :maxdepth: 1 + + /tutorials/tutorial_machine_learning + /gallery/minimizing_correlations/minimizing_correlations + /gallery/state_learner/StateLearning + +.. warning:: + + We are currently making some huge changes to the TensorFlow backend + of Strawberry Fields, including upcoming TensorFlow 2.0 support. As a result, + this tutorial currently is unavailable. Please bare with us as we finish off the + work on our new TensorFlow backend! + .. customgalleryitem:: :tooltip: Optimization and machine learning with TensorFlow :description: :doc:`Optimization and machine learning with TensorFlow
` @@ -119,16 +153,6 @@ Algorithms :description: :doc:`/gallery/gate_synthesis/GateSynthesis` :figure: /gallery/gate_synthesis/GateSynthesis.gif -.. customgalleryitem:: - :tooltip: Scattershot boson sampling - :description: :doc:`/gallery/scattershot-boson-sampling/scattershot-bs` - :figure: /gallery/scattershot-boson-sampling/scattershot-bs.gif - -.. customgalleryitem:: - :tooltip: Photonic gate visualization - :description: :doc:`/gallery/gate_visualisation/GateVisualisation` - :figure: /gallery/gate_visualisation/GateVisualisation.gif - .. raw:: html
diff --git a/strawberryfields/api/job.py b/strawberryfields/api/job.py index c2e3897bf..c1df2c317 100644 --- a/strawberryfields/api/job.py +++ b/strawberryfields/api/job.py @@ -72,7 +72,7 @@ class Job: job is managed """ - def __init__(self, id_: str, status: JobStatus, connection: "Connection"): + def __init__(self, id_: str, status: JobStatus, connection): self._id = id_ self._status = status self._connection = connection diff --git a/strawberryfields/configuration.py b/strawberryfields/configuration.py index 75f6185e9..fb9184353 100644 --- a/strawberryfields/configuration.py +++ b/strawberryfields/configuration.py @@ -14,12 +14,6 @@ r""" This module contains functions used to load, store, save, and modify configuration options for Strawberry Fields. - -.. warning:: - - See more details regarding Strawberry Fields configuration and available - configuration options on the :doc:`/introduction/configuration` page. - """ import logging as log import os From 02e3acc2dd751704e9f3c5d5309ca0dfeb772c52 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 14:57:46 +1030 Subject: [PATCH 313/335] more docs fixes --- .gitignore | 1 - README.rst | 24 +----------------------- doc/_static/xanadu_gallery.css | 3 ++- doc/development/research.rst | 12 ++---------- doc/index.rst | 4 +++- 5 files changed, 8 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 4ee357004..df104e9b5 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,6 @@ notebooks/* examples/.ipynb_checkpoints/* .pytest_cache/* pytest/.pytest_cache/ -doc/tutorials_apps/* examples_apps/*.html doc/_static/thumbs/* !doc/_static/code.png diff --git a/README.rst b/README.rst index 392696d84..087e333a5 100644 --- a/README.rst +++ b/README.rst @@ -53,35 +53,13 @@ Features Installation ============ -Strawberry Fields requires Python version 3.5, 3.6, or 3.7 (3.8 is currently not support). Installation of Strawberry Fields, as well as all dependencies, can be done using pip: +Strawberry Fields requires Python version 3.5, 3.6, 3.7, or 3.8. Installation of Strawberry Fields, as well as all dependencies, can be done using pip: .. code-block:: bash pip install strawberryfields -TensorFlow support ------------------- - -To use Strawberry Fields with TensorFlow, version 1.3 of -TensorFlow is required. This can be installed alongside Strawberry Fields -as follows: - -.. code-block:: console - - pip install strawberryfields tensorflow==1.3 - -Or, to install Strawberry Fields and TensorFlow with GPU and CUDA support: - -.. code-block:: console - - pip install strawberryfields tensorflow-gpu==1.3 - - -Note that TensorFlow version 1.3 is only supported on Python versions -less than 3.7. - - Getting started =============== diff --git a/doc/_static/xanadu_gallery.css b/doc/_static/xanadu_gallery.css index fd2a802ae..c630e09ba 100644 --- a/doc/_static/xanadu_gallery.css +++ b/doc/_static/xanadu_gallery.css @@ -227,7 +227,8 @@ p.sphx-glr-signature a.reference.external { .sphx-glr-download-link-note.admonition.note, -.reference.download.internal, +.sphx-glr-download, +.sphx-glr-footer, .sphx-glr-signature { display: none; } diff --git a/doc/development/research.rst b/doc/development/research.rst index 35a5902a7..aca1009c1 100644 --- a/doc/development/research.rst +++ b/doc/development/research.rst @@ -1,7 +1,7 @@ .. _research: Research and contribution -=============================== +========================= Research --------------- @@ -34,15 +34,7 @@ If you are having issues, please let us know by posting the issue on our Github To chat directly with the team designing and building Strawberry Fields, as well as members of our community - ranging from professors of quantum physics, to students, to those just interested in being a -part of a rapidly growing industry - you can join our `Slack channel `_. - -Available channels: - -* ``#strawberryfields``: For general discussion regarding Strawberry Fields -* ``#sf_projects``: For discussion of research ideas and projects built on Strawberry Fields -* ``#sf_bugs``: For discussion of any bugs and issues you run into using Strawberry Fields -* ``#sf_interactive``: For discussion relating to the `Strawberry Fields Interactive `_ web application -* ``#sf_docs``: For discussion of the Strawberry Fields `documentation `_ +part of a rapidly growing industry - you can join our `discussion forum `_ and `Slack channel `_. Sometimes, it might take us a couple of hours to reply - please be patient! diff --git a/doc/index.rst b/doc/index.rst index 12869683e..c81928191 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -128,7 +128,9 @@ Support - **Source Code:** https://github.com/XanaduAI/strawberryfields - **Issue Tracker:** https://github.com/XanaduAI/strawberryfields/issues -If you are having issues, please let us know by posting the issue on our Github issue tracker, or by joining our `Strawberry Fields Slack channel `_. +If you are having issues, please let us know by posting the issue on our Github issue tracker. + +To chat directly with the team designing and building Strawberry Fields, as well as members of our community — ranging from quantum computing researchers, to students, to those just interested in being a part of a rapidly growing industry — you can join our `discussion forum `_ and `Slack channel `_. For more details on contributing or performing research with Strawberry Fields, please see :ref:`research`. From 484108b6beb70def23cb3ff67192c123bd94dabc Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 14:58:13 +1030 Subject: [PATCH 314/335] temporarily add tutorial cache --- .../sphx_glr_run_tutorial_dense_thumb.png | Bin 0 -> 27537 bytes ...sphx_glr_run_tutorial_max_clique_thumb.png | Bin 0 -> 27537 bytes .../sphx_glr_run_tutorial_points_thumb.png | Bin 0 -> 27537 bytes .../sphx_glr_run_tutorial_sample_thumb.png | Bin 0 -> 27537 bytes ...sphx_glr_run_tutorial_similarity_thumb.png | Bin 0 -> 27537 bytes .../sphx_glr_run_tutorial_vibronic_thumb.png | Bin 0 -> 27537 bytes doc/tutorials_apps/index.rst | 160 +++++ doc/tutorials_apps/run_tutorial_dense.ipynb | 165 +++++ doc/tutorials_apps/run_tutorial_dense.py | 116 ++++ doc/tutorials_apps/run_tutorial_dense.py.md5 | 1 + doc/tutorials_apps/run_tutorial_dense.rst | 229 +++++++ .../run_tutorial_max_clique.ipynb | 287 +++++++++ doc/tutorials_apps/run_tutorial_max_clique.py | 164 +++++ .../run_tutorial_max_clique.py.md5 | 1 + .../run_tutorial_max_clique.rst | 354 +++++++++++ doc/tutorials_apps/run_tutorial_points.ipynb | 208 ++++++ doc/tutorials_apps/run_tutorial_points.py | 156 +++++ doc/tutorials_apps/run_tutorial_points.py.md5 | 1 + doc/tutorials_apps/run_tutorial_points.rst | 264 ++++++++ doc/tutorials_apps/run_tutorial_sample.ipynb | 151 +++++ doc/tutorials_apps/run_tutorial_sample.py | 154 +++++ doc/tutorials_apps/run_tutorial_sample.py.md5 | 1 + doc/tutorials_apps/run_tutorial_sample.rst | 276 ++++++++ .../run_tutorial_similarity.ipynb | 392 ++++++++++++ doc/tutorials_apps/run_tutorial_similarity.py | 314 +++++++++ .../run_tutorial_similarity.py.md5 | 1 + .../run_tutorial_similarity.rst | 596 ++++++++++++++++++ .../run_tutorial_vibronic.ipynb | 161 +++++ doc/tutorials_apps/run_tutorial_vibronic.py | 134 ++++ .../run_tutorial_vibronic.py.md5 | 1 + doc/tutorials_apps/run_tutorial_vibronic.rst | 219 +++++++ doc/tutorials_apps/sg_execution_times.rst | 15 + doc/tutorials_apps/tutorials_apps_jupyter.zip | Bin 0 -> 68075 bytes doc/tutorials_apps/tutorials_apps_python.zip | Bin 0 -> 52079 bytes 34 files changed, 4521 insertions(+) create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png create mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png create mode 100644 doc/tutorials_apps/index.rst create mode 100644 doc/tutorials_apps/run_tutorial_dense.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_dense.py create mode 100644 doc/tutorials_apps/run_tutorial_dense.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_dense.rst create mode 100644 doc/tutorials_apps/run_tutorial_max_clique.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_max_clique.py create mode 100644 doc/tutorials_apps/run_tutorial_max_clique.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_max_clique.rst create mode 100644 doc/tutorials_apps/run_tutorial_points.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_points.py create mode 100644 doc/tutorials_apps/run_tutorial_points.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_points.rst create mode 100644 doc/tutorials_apps/run_tutorial_sample.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_sample.py create mode 100644 doc/tutorials_apps/run_tutorial_sample.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_sample.rst create mode 100644 doc/tutorials_apps/run_tutorial_similarity.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_similarity.py create mode 100644 doc/tutorials_apps/run_tutorial_similarity.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_similarity.rst create mode 100644 doc/tutorials_apps/run_tutorial_vibronic.ipynb create mode 100644 doc/tutorials_apps/run_tutorial_vibronic.py create mode 100644 doc/tutorials_apps/run_tutorial_vibronic.py.md5 create mode 100644 doc/tutorials_apps/run_tutorial_vibronic.rst create mode 100644 doc/tutorials_apps/sg_execution_times.rst create mode 100644 doc/tutorials_apps/tutorials_apps_jupyter.zip create mode 100644 doc/tutorials_apps/tutorials_apps_python.zip diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..59370dd40a3a2bbad477dc433467dfbbe0ac56cf GIT binary patch literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( literal 0 HcmV?d00001 diff --git a/doc/tutorials_apps/index.rst b/doc/tutorials_apps/index.rst new file mode 100644 index 000000000..6f32268b9 --- /dev/null +++ b/doc/tutorials_apps/index.rst @@ -0,0 +1,160 @@ +:orphan: + + + +.. _sphx_glr_tutorials_apps: + +Tutorials +========= + +SF Tutorials + + + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_points.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_points + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_dense.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_dense + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_vibronic.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_vibronic + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_sample.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_sample + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_similarity.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_similarity + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png + + :ref:`sphx_glr_tutorials_apps_run_tutorial_max_clique.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials_apps/run_tutorial_max_clique +.. raw:: html + +
+ + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-gallery + + + .. container:: sphx-glr-download + + :download:`Download all examples in Python source code: tutorials_apps_python.zip ` + + + + .. container:: sphx-glr-download + + :download:`Download all examples in Jupyter notebooks: tutorials_apps_jupyter.zip ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_dense.ipynb b/doc/tutorials_apps/run_tutorial_dense.ipynb new file mode 100644 index 000000000..f1e65a218 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_dense.ipynb @@ -0,0 +1,165 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nDense Subgraphs\n===============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph`\n\nGraphs can be used to model a wide variety of concepts: social networks, financial markets,\nbiological networks, and many others. A common problem of interest is to find subgraphs that\ncontain a large number of connections between their nodes. These subgraphs may correspond to\ncommunities in social networks, correlated assets in a market, or mutually influential proteins\nin a biological network.\n\nMathematically, this task is known as the `dense subgraph problem\n`__. The density of a $k$-node subgraph is equal\nto the number of its edges divided by the maximum possible number of edges.\nIdentifying the densest graph of a given size, known as the densest-$k$ subgraph problem,\nis `NP-Hard `__.\n\n\nAs shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph\ninto a GBS device, it samples dense subgraphs with high probability. This property can be\nused to find dense subgraphs by sampling from a GBS device and postprocessing the outputs.\nLet's take a look!\n\nFinding dense subgraphs\n-----------------------\nThe first step is to import all required modules. We'll need the :mod:`~.apps.data`\nmodule to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the\n:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to\nvisualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and\nNetworkX for graph operations.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import data, sample, subgraph, plot\nimport plotly\nimport networkx as nx" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we'll study a 30-node graph with a planted 10-node graph, as considered in\n:cite:`arrazola2018using`. The graph is generated by joining two Erd\u0151s\u2013R\u00e9nyi random graphs. The\nfirst graph of 20 nodes is created with edge probability of 0.5. The second planted\ngraph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the\ngraph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an\nedge between them. This graph has the sneaky property that even though the planted subgraph is the\ndensest of its size, its nodes have a lower average degree than the nodes in the rest of the\ngraph.\n\nThe :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them,\npostselect on samples with a large number of clicks, and convert them to subgraphs:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "planted = data.Planted()\npostselected = sample.postselect(planted, 16, 30)\npl_graph = nx.to_networkx_graph(planted.adj)\nsamples = sample.to_subgraphs(postselected, pl_graph)\nprint(len(samples))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Not bad! We have more than 2000 samples to play with \ud83d\ude0e. The planted subgraph is actually easy to\nidentify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to\nplot graphs in Strawberry Fields:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sub = list(range(20, 30))\nplot_graph = plot.graph(pl_graph, sub)\nplotly.offline.plot(plot_graph, filename=\"planted.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/planted.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view the graph.

\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A more interesting challenge is to find dense subgraphs of different sizes; it is often\nuseful to identify many high-density subgraphs, not just the densest ones. This is the purpose of\nthe :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify\ncollections of dense subgraphs for a range of sizes. The output of this function is a\ndictionary whose keys correspond to subgraph sizes within the specified range. The values in\nthe dictionary are the top subgraphs of that size and their corresponding density.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs\nfor k in range(8, 17):\n print(dense[k][0]) # print only the densest subgraph of each size" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the results of the search we learn that, depending on their size, the densest subgraphs\nbelong to different regions of the graph: dense subgraphs of less than ten nodes are contained\nwithin the planted subgraph, whereas larger dense subgraphs appear outside of the planted\nsubgraph. Smaller dense subgraphs can be cliques, characterized by having\nmaximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and\nlargest subgraphs look like:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "densest_8 = plot.graph(pl_graph, dense[8][0][1])\ndensest_16 = plot.graph(pl_graph, dense[12][0][1])\n\nplotly.offline.plot(densest_8, filename=\"densest_8.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/densest_8.html\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plotly.offline.plot(densest_16, filename=\"densest_16.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/densest_16.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In principle there are different methods to postprocess GBS outputs to identify dense\nsubgraphs. For example, techniques for finding maximum cliques, included in the\n:mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find\nlarger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to\nsample dense subgraphs with clever classical techniques. Can you think of your own hybrid\nalgorithm? \ud83e\udd14\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_dense.py b/doc/tutorials_apps/run_tutorial_dense.py new file mode 100644 index 000000000..964fc4c29 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_dense.py @@ -0,0 +1,116 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports +""" +.. _apps-subgraph-tutorial: + +Dense Subgraphs +=============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph` + +Graphs can be used to model a wide variety of concepts: social networks, financial markets, +biological networks, and many others. A common problem of interest is to find subgraphs that +contain a large number of connections between their nodes. These subgraphs may correspond to +communities in social networks, correlated assets in a market, or mutually influential proteins +in a biological network. + +Mathematically, this task is known as the `dense subgraph problem +`__. The density of a :math:`k`-node subgraph is equal +to the number of its edges divided by the maximum possible number of edges. +Identifying the densest graph of a given size, known as the densest-:math:`k` subgraph problem, +is `NP-Hard `__. + + +As shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph +into a GBS device, it samples dense subgraphs with high probability. This property can be +used to find dense subgraphs by sampling from a GBS device and postprocessing the outputs. +Let's take a look! + +Finding dense subgraphs +----------------------- +The first step is to import all required modules. We'll need the :mod:`~.apps.data` +module to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the +:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to +visualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and +NetworkX for graph operations. +""" +from strawberryfields.apps import data, sample, subgraph, plot +import plotly +import networkx as nx + +############################################################################## +# Here we'll study a 30-node graph with a planted 10-node graph, as considered in +# :cite:`arrazola2018using`. The graph is generated by joining two Erdős–Rényi random graphs. The +# first graph of 20 nodes is created with edge probability of 0.5. The second planted +# graph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the +# graph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an +# edge between them. This graph has the sneaky property that even though the planted subgraph is the +# densest of its size, its nodes have a lower average degree than the nodes in the rest of the +# graph. +# +# The :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them, +# postselect on samples with a large number of clicks, and convert them to subgraphs: + +planted = data.Planted() +postselected = sample.postselect(planted, 16, 30) +pl_graph = nx.to_networkx_graph(planted.adj) +samples = sample.to_subgraphs(postselected, pl_graph) +print(len(samples)) + +############################################################################## +# Not bad! We have more than 2000 samples to play with 😎. The planted subgraph is actually easy to +# identify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to +# plot graphs in Strawberry Fields: +sub = list(range(20, 30)) +plot_graph = plot.graph(pl_graph, sub) +plotly.offline.plot(plot_graph, filename="planted.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/planted.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``plot_graph.show()`` to view the graph. + +############################################################################## +# A more interesting challenge is to find dense subgraphs of different sizes; it is often +# useful to identify many high-density subgraphs, not just the densest ones. This is the purpose of +# the :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify +# collections of dense subgraphs for a range of sizes. The output of this function is a +# dictionary whose keys correspond to subgraph sizes within the specified range. The values in +# the dictionary are the top subgraphs of that size and their corresponding density. + +dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs +for k in range(8, 17): + print(dense[k][0]) # print only the densest subgraph of each size + +############################################################################## +# From the results of the search we learn that, depending on their size, the densest subgraphs +# belong to different regions of the graph: dense subgraphs of less than ten nodes are contained +# within the planted subgraph, whereas larger dense subgraphs appear outside of the planted +# subgraph. Smaller dense subgraphs can be cliques, characterized by having +# maximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and +# largest subgraphs look like: + +densest_8 = plot.graph(pl_graph, dense[8][0][1]) +densest_16 = plot.graph(pl_graph, dense[12][0][1]) + +plotly.offline.plot(densest_8, filename="densest_8.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/densest_8.html + +plotly.offline.plot(densest_16, filename="densest_16.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/densest_16.html + +############################################################################## +# In principle there are different methods to postprocess GBS outputs to identify dense +# subgraphs. For example, techniques for finding maximum cliques, included in the +# :mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find +# larger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to +# sample dense subgraphs with clever classical techniques. Can you think of your own hybrid +# algorithm? 🤔 diff --git a/doc/tutorials_apps/run_tutorial_dense.py.md5 b/doc/tutorials_apps/run_tutorial_dense.py.md5 new file mode 100644 index 000000000..456404ca2 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_dense.py.md5 @@ -0,0 +1 @@ +a3bc0cddd4d982bccbab3a7fcc4ef7a3 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_dense.rst b/doc/tutorials_apps/run_tutorial_dense.rst new file mode 100644 index 000000000..fdc7a30c0 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_dense.rst @@ -0,0 +1,229 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_dense.py: + + +.. _apps-subgraph-tutorial: + +Dense Subgraphs +=============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph` + +Graphs can be used to model a wide variety of concepts: social networks, financial markets, +biological networks, and many others. A common problem of interest is to find subgraphs that +contain a large number of connections between their nodes. These subgraphs may correspond to +communities in social networks, correlated assets in a market, or mutually influential proteins +in a biological network. + +Mathematically, this task is known as the `dense subgraph problem +`__. The density of a :math:`k`-node subgraph is equal +to the number of its edges divided by the maximum possible number of edges. +Identifying the densest graph of a given size, known as the densest-:math:`k` subgraph problem, +is `NP-Hard `__. + + +As shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph +into a GBS device, it samples dense subgraphs with high probability. This property can be +used to find dense subgraphs by sampling from a GBS device and postprocessing the outputs. +Let's take a look! + +Finding dense subgraphs +----------------------- +The first step is to import all required modules. We'll need the :mod:`~.apps.data` +module to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the +:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to +visualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and +NetworkX for graph operations. + + +.. code-block:: default + + from strawberryfields.apps import data, sample, subgraph, plot + import plotly + import networkx as nx + + + + + + + +Here we'll study a 30-node graph with a planted 10-node graph, as considered in +:cite:`arrazola2018using`. The graph is generated by joining two Erdős–Rényi random graphs. The +first graph of 20 nodes is created with edge probability of 0.5. The second planted +graph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the +graph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an +edge between them. This graph has the sneaky property that even though the planted subgraph is the +densest of its size, its nodes have a lower average degree than the nodes in the rest of the +graph. + +The :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them, +postselect on samples with a large number of clicks, and convert them to subgraphs: + + +.. code-block:: default + + + planted = data.Planted() + postselected = sample.postselect(planted, 16, 30) + pl_graph = nx.to_networkx_graph(planted.adj) + samples = sample.to_subgraphs(postselected, pl_graph) + print(len(samples)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 2181 + + +Not bad! We have more than 2000 samples to play with 😎. The planted subgraph is actually easy to +identify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to +plot graphs in Strawberry Fields: + + +.. code-block:: default + + sub = list(range(20, 30)) + plot_graph = plot.graph(pl_graph, sub) + plotly.offline.plot(plot_graph, filename="planted.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/planted.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``plot_graph.show()`` to view the graph. + +A more interesting challenge is to find dense subgraphs of different sizes; it is often +useful to identify many high-density subgraphs, not just the densest ones. This is the purpose of +the :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify +collections of dense subgraphs for a range of sizes. The output of this function is a +dictionary whose keys correspond to subgraph sizes within the specified range. The values in +the dictionary are the top subgraphs of that size and their corresponding density. + + +.. code-block:: default + + + dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs + for k in range(8, 17): + print(dense[k][0]) # print only the densest subgraph of each size + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + (1.0, [21, 22, 24, 25, 26, 27, 28, 29]) + (0.9722222222222222, [21, 22, 23, 24, 25, 26, 27, 28, 29]) + (0.9333333333333333, [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) + (0.7818181818181819, [17, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) + (0.696969696969697, [0, 2, 3, 5, 6, 8, 9, 10, 14, 16, 17, 18]) + (0.6666666666666666, [2, 3, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18]) + (0.6483516483516484, [0, 3, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) + (0.6285714285714286, [0, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) + (0.6083333333333333, [0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) + + +From the results of the search we learn that, depending on their size, the densest subgraphs +belong to different regions of the graph: dense subgraphs of less than ten nodes are contained +within the planted subgraph, whereas larger dense subgraphs appear outside of the planted +subgraph. Smaller dense subgraphs can be cliques, characterized by having +maximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and +largest subgraphs look like: + + +.. code-block:: default + + + densest_8 = plot.graph(pl_graph, dense[8][0][1]) + densest_16 = plot.graph(pl_graph, dense[12][0][1]) + + plotly.offline.plot(densest_8, filename="densest_8.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/densest_8.html + + +.. code-block:: default + + + plotly.offline.plot(densest_16, filename="densest_16.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/densest_16.html + +In principle there are different methods to postprocess GBS outputs to identify dense +subgraphs. For example, techniques for finding maximum cliques, included in the +:mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find +larger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to +sample dense subgraphs with clever classical techniques. Can you think of your own hybrid +algorithm? 🤔 + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 1 minutes 0.299 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_dense.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_dense.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_dense.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_max_clique.ipynb b/doc/tutorials_apps/run_tutorial_max_clique.ipynb new file mode 100644 index 000000000..140095bf9 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_max_clique.ipynb @@ -0,0 +1,287 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nMaximum Clique\n==============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique`\n\nHere we'll explore how to combine GBS samples with local search algorithms to find large cliques\nin graphs. Let's get started!\n\nA clique is a special type of subgraph where all possible connections between nodes are present;\nthey are densest possible subgraphs of their size. The maximum clique problem, or max clique for\nshort, asks the question: given a graph $G$, what is the largest clique in the graph?\nMax clique is `NP-Hard `_, so finding the biggest clique\nbecomes challenging for graphs with many\nnodes. This is why we need clever algorithms to identify large cliques!\n\nTo get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This\nis the *binding interaction graph* representing the spatial compatibility of atom pairs in a\nprotein-molecule complex. Cliques in this graph correspond to stable docking configurations, which\nare of interest in determining how the molecule interacts with the protein.\n\nThe first step is to import the Strawberry Fields ``apps`` module and external dependencies:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import data, plot, sample, clique\nimport numpy as np\nimport networkx as nx\nimport plotly" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the\ngraph can be visualized using the :mod:`~.apps.plot` module:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "TA = data.TaceAs()\nA = TA.adj\nTA_graph = nx.Graph(A)\nplot_graph = plot.graph(TA_graph)\nplotly.offline.plot(plot_graph, filename=\"TACE-AS.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/TACE-AS.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view your graph.

\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph\nis sufficiently small that all cliques can be found by performing an exhaustive search over\nall subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique\nnot contained inside another clique:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "maximal_clique = [4, 11, 12, 18]\nmaximal_fig = plot.graph(TA_graph, maximal_clique)\nplotly.offline.plot(maximal_fig, filename=\"maximal_clique.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/maximal_clique.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make\nuse of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and\npost-select samples with a specific number of clicks. Here we'll look at samples with eight\nclicks, of which there are a total of 1,984:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "postselected = sample.postselect(TA, 8, 8)\nsamples = sample.to_subgraphs(postselected, TA_graph)\nprint(len(samples))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this\nby comparing the average subgraph density in the GBS samples to uniformly generated samples:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "GBS_dens = []\nu_dens = []\n\nfor s in samples:\n uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample\n GBS_dens.append(nx.density(TA_graph.subgraph(s)))\n u_dens.append(nx.density(TA_graph.subgraph(uniform)))\n\nprint(\"GBS mean density = {:.4f}\".format(np.mean(GBS_dens)))\nprint(\"Uniform mean density = {:.4f}\".format(np.mean(u_dens)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Those look like great GBS samples \ud83d\udcaa! To obtain cliques, we shrink the samples by greedily\nremoving nodes with low degree until a clique is found.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "shrunk = [clique.shrink(s, TA_graph) for s in samples]\nprint(clique.is_clique(TA_graph.subgraph(shrunk[0])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at some of these cliques. What are the clique sizes in the first ten samples?\nWhat is the average clique size? How about the largest and smallest clique size?\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "clique_sizes = [len(s) for s in shrunk]\nprint(\"First ten clique sizes = \", clique_sizes[:10])\nprint(\"Average clique size = {:.3f}\".format(np.mean(clique_sizes)))\nprint(\"Maximum clique size = \", np.max(clique_sizes))\nprint(\"Minimum clique size = \", np.min(clique_sizes))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Even in the first few samples, we've already identified larger cliques than the 4-node clique\nwe studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average\nsize of roughly five. We can enlarge these cliques by searching for larger cliques in their\nvicinity. We'll do this by taking ten iterations of local search and studying the results.\nNote: this may take a few seconds.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "searched = [clique.search(s, TA_graph, 10) for s in shrunk]\nclique_sizes = [len(s) for s in searched]\nprint(\"First two cliques = \", searched[:2])\nprint(\"Average clique size = {:.3f}\".format(np.mean(clique_sizes)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Wow! Local search is very helpful, we've found cliques with the maximum size of eight for\nessentially all samples \ud83e\udd29. Let's take a look at the first clique we found\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "clique_fig = plot.graph(TA_graph, searched[0])\nplotly.offline.plot(clique_fig, filename=\"maximum_clique.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/maximum_clique.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A\ntougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS\n`_ maximum clique\ndataset. In this section, we'll write a short program that uses GBS samples in combination with\nlocal search to identify large cliques in this graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "Phat = data.PHat() # Load data\nphat_graph = nx.Graph(Phat.adj) # Obtain graph\npostselected = sample.postselect(Phat, 16, 20) # Post-select samples\nsamples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs\nshrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques\nsearched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search\nclique_sizes = [len(s) for s in searched]\nlargest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found\nprint(\"Largest clique found is = \", largest_clique)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's make a plot to take a closer look at the largest clique we found\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "largest_fig = plot.graph(phat_graph, largest_clique)\nplotly.offline.plot(largest_fig, filename=\"largest_clique.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/largest_clique.html\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "just_largest = plot.subgraph(phat_graph.subgraph(largest_clique))\nplotly.offline.plot(just_largest, filename=\"just_largest.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/just_largest.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ``p_hat300-1`` graph has several maximum cliques of size eight,\nand we have managed to find them! What other graphs can you analyze using GBS?\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_max_clique.py b/doc/tutorials_apps/run_tutorial_max_clique.py new file mode 100644 index 000000000..543409e0d --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_max_clique.py @@ -0,0 +1,164 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports +""" +.. _apps-clique-tutorial: + +Maximum Clique +============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique` + +Here we'll explore how to combine GBS samples with local search algorithms to find large cliques +in graphs. Let's get started! + +A clique is a special type of subgraph where all possible connections between nodes are present; +they are densest possible subgraphs of their size. The maximum clique problem, or max clique for +short, asks the question: given a graph :math:`G`, what is the largest clique in the graph? +Max clique is `NP-Hard `_, so finding the biggest clique +becomes challenging for graphs with many +nodes. This is why we need clever algorithms to identify large cliques! + +To get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This +is the *binding interaction graph* representing the spatial compatibility of atom pairs in a +protein-molecule complex. Cliques in this graph correspond to stable docking configurations, which +are of interest in determining how the molecule interacts with the protein. + +The first step is to import the Strawberry Fields ``apps`` module and external dependencies: +""" +from strawberryfields.apps import data, plot, sample, clique +import numpy as np +import networkx as nx +import plotly + +############################################################################## +# The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the +# graph can be visualized using the :mod:`~.apps.plot` module: + +TA = data.TaceAs() +A = TA.adj +TA_graph = nx.Graph(A) +plot_graph = plot.graph(TA_graph) +plotly.offline.plot(plot_graph, filename="TACE-AS.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/TACE-AS.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``plot_graph.show()`` to view your graph. + +############################################################################## +# Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph +# is sufficiently small that all cliques can be found by performing an exhaustive search over +# all subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique +# not contained inside another clique: + +maximal_clique = [4, 11, 12, 18] +maximal_fig = plot.graph(TA_graph, maximal_clique) +plotly.offline.plot(maximal_fig, filename="maximal_clique.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/maximal_clique.html + +############################################################################## +# We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make +# use of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and +# post-select samples with a specific number of clicks. Here we'll look at samples with eight +# clicks, of which there are a total of 1,984: + +postselected = sample.postselect(TA, 8, 8) +samples = sample.to_subgraphs(postselected, TA_graph) +print(len(samples)) + +############################################################################## +# GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this +# by comparing the average subgraph density in the GBS samples to uniformly generated samples: + +GBS_dens = [] +u_dens = [] + +for s in samples: + uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample + GBS_dens.append(nx.density(TA_graph.subgraph(s))) + u_dens.append(nx.density(TA_graph.subgraph(uniform))) + +print("GBS mean density = {:.4f}".format(np.mean(GBS_dens))) +print("Uniform mean density = {:.4f}".format(np.mean(u_dens))) + +############################################################################## +# Those look like great GBS samples 💪! To obtain cliques, we shrink the samples by greedily +# removing nodes with low degree until a clique is found. + +shrunk = [clique.shrink(s, TA_graph) for s in samples] +print(clique.is_clique(TA_graph.subgraph(shrunk[0]))) + +############################################################################## +# Let's take a look at some of these cliques. What are the clique sizes in the first ten samples? +# What is the average clique size? How about the largest and smallest clique size? + +clique_sizes = [len(s) for s in shrunk] +print("First ten clique sizes = ", clique_sizes[:10]) +print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) +print("Maximum clique size = ", np.max(clique_sizes)) +print("Minimum clique size = ", np.min(clique_sizes)) + +############################################################################## +# Even in the first few samples, we've already identified larger cliques than the 4-node clique +# we studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average +# size of roughly five. We can enlarge these cliques by searching for larger cliques in their +# vicinity. We'll do this by taking ten iterations of local search and studying the results. +# Note: this may take a few seconds. + +searched = [clique.search(s, TA_graph, 10) for s in shrunk] +clique_sizes = [len(s) for s in searched] +print("First two cliques = ", searched[:2]) +print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) + +############################################################################## +# Wow! Local search is very helpful, we've found cliques with the maximum size of eight for +# essentially all samples 🤩. Let's take a look at the first clique we found + +clique_fig = plot.graph(TA_graph, searched[0]) +plotly.offline.plot(clique_fig, filename="maximum_clique.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/maximum_clique.html + +############################################################################## +# The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A +# tougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS +# `_ maximum clique +# dataset. In this section, we'll write a short program that uses GBS samples in combination with +# local search to identify large cliques in this graph. + +Phat = data.PHat() # Load data +phat_graph = nx.Graph(Phat.adj) # Obtain graph +postselected = sample.postselect(Phat, 16, 20) # Post-select samples +samples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs +shrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques +searched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search +clique_sizes = [len(s) for s in searched] +largest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found +print("Largest clique found is = ", largest_clique) + +############################################################################## +# Let's make a plot to take a closer look at the largest clique we found +largest_fig = plot.graph(phat_graph, largest_clique) +plotly.offline.plot(largest_fig, filename="largest_clique.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/largest_clique.html + +just_largest = plot.subgraph(phat_graph.subgraph(largest_clique)) +plotly.offline.plot(just_largest, filename="just_largest.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/just_largest.html + +############################################################################## +# The ``p_hat300-1`` graph has several maximum cliques of size eight, +# and we have managed to find them! What other graphs can you analyze using GBS? diff --git a/doc/tutorials_apps/run_tutorial_max_clique.py.md5 b/doc/tutorials_apps/run_tutorial_max_clique.py.md5 new file mode 100644 index 000000000..169cebacb --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_max_clique.py.md5 @@ -0,0 +1 @@ +ee889bb45b0c33edb758e978abc8518a \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_max_clique.rst b/doc/tutorials_apps/run_tutorial_max_clique.rst new file mode 100644 index 000000000..ad3d6dd74 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_max_clique.rst @@ -0,0 +1,354 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_max_clique.py: + + +.. _apps-clique-tutorial: + +Maximum Clique +============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique` + +Here we'll explore how to combine GBS samples with local search algorithms to find large cliques +in graphs. Let's get started! + +A clique is a special type of subgraph where all possible connections between nodes are present; +they are densest possible subgraphs of their size. The maximum clique problem, or max clique for +short, asks the question: given a graph :math:`G`, what is the largest clique in the graph? +Max clique is `NP-Hard `_, so finding the biggest clique +becomes challenging for graphs with many +nodes. This is why we need clever algorithms to identify large cliques! + +To get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This +is the *binding interaction graph* representing the spatial compatibility of atom pairs in a +protein-molecule complex. Cliques in this graph correspond to stable docking configurations, which +are of interest in determining how the molecule interacts with the protein. + +The first step is to import the Strawberry Fields ``apps`` module and external dependencies: + + +.. code-block:: default + + from strawberryfields.apps import data, plot, sample, clique + import numpy as np + import networkx as nx + import plotly + + + + + + + +The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the +graph can be visualized using the :mod:`~.apps.plot` module: + + +.. code-block:: default + + + TA = data.TaceAs() + A = TA.adj + TA_graph = nx.Graph(A) + plot_graph = plot.graph(TA_graph) + plotly.offline.plot(plot_graph, filename="TACE-AS.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/TACE-AS.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``plot_graph.show()`` to view your graph. + +Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph +is sufficiently small that all cliques can be found by performing an exhaustive search over +all subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique +not contained inside another clique: + + +.. code-block:: default + + + maximal_clique = [4, 11, 12, 18] + maximal_fig = plot.graph(TA_graph, maximal_clique) + plotly.offline.plot(maximal_fig, filename="maximal_clique.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/maximal_clique.html + +We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make +use of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and +post-select samples with a specific number of clicks. Here we'll look at samples with eight +clicks, of which there are a total of 1,984: + + +.. code-block:: default + + + postselected = sample.postselect(TA, 8, 8) + samples = sample.to_subgraphs(postselected, TA_graph) + print(len(samples)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 1984 + + +GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this +by comparing the average subgraph density in the GBS samples to uniformly generated samples: + + +.. code-block:: default + + + GBS_dens = [] + u_dens = [] + + for s in samples: + uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample + GBS_dens.append(nx.density(TA_graph.subgraph(s))) + u_dens.append(nx.density(TA_graph.subgraph(uniform))) + + print("GBS mean density = {:.4f}".format(np.mean(GBS_dens))) + print("Uniform mean density = {:.4f}".format(np.mean(u_dens))) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + GBS mean density = 0.7005 + Uniform mean density = 0.5874 + + +Those look like great GBS samples 💪! To obtain cliques, we shrink the samples by greedily +removing nodes with low degree until a clique is found. + + +.. code-block:: default + + + shrunk = [clique.shrink(s, TA_graph) for s in samples] + print(clique.is_clique(TA_graph.subgraph(shrunk[0]))) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + True + + +Let's take a look at some of these cliques. What are the clique sizes in the first ten samples? +What is the average clique size? How about the largest and smallest clique size? + + +.. code-block:: default + + + clique_sizes = [len(s) for s in shrunk] + print("First ten clique sizes = ", clique_sizes[:10]) + print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) + print("Maximum clique size = ", np.max(clique_sizes)) + print("Minimum clique size = ", np.min(clique_sizes)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + First ten clique sizes = [4, 5, 6, 7, 4, 4, 4, 6, 5, 5] + Average clique size = 5.009 + Maximum clique size = 8 + Minimum clique size = 3 + + +Even in the first few samples, we've already identified larger cliques than the 4-node clique +we studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average +size of roughly five. We can enlarge these cliques by searching for larger cliques in their +vicinity. We'll do this by taking ten iterations of local search and studying the results. +Note: this may take a few seconds. + + +.. code-block:: default + + + searched = [clique.search(s, TA_graph, 10) for s in shrunk] + clique_sizes = [len(s) for s in searched] + print("First two cliques = ", searched[:2]) + print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + First two cliques = [[5, 11, 13, 14, 16, 20, 21, 22], [1, 2, 4, 7, 8, 10, 17, 23]] + Average clique size = 8.000 + + +Wow! Local search is very helpful, we've found cliques with the maximum size of eight for +essentially all samples 🤩. Let's take a look at the first clique we found + + +.. code-block:: default + + + clique_fig = plot.graph(TA_graph, searched[0]) + plotly.offline.plot(clique_fig, filename="maximum_clique.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/maximum_clique.html + +The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A +tougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS +`_ maximum clique +dataset. In this section, we'll write a short program that uses GBS samples in combination with +local search to identify large cliques in this graph. + + +.. code-block:: default + + + Phat = data.PHat() # Load data + phat_graph = nx.Graph(Phat.adj) # Obtain graph + postselected = sample.postselect(Phat, 16, 20) # Post-select samples + samples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs + shrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques + searched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search + clique_sizes = [len(s) for s in searched] + largest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found + print("Largest clique found is = ", largest_clique) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + Largest clique found is = [114, 121, 132, 138, 173, 189, 199, 249] + + +Let's make a plot to take a closer look at the largest clique we found + + +.. code-block:: default + + largest_fig = plot.graph(phat_graph, largest_clique) + plotly.offline.plot(largest_fig, filename="largest_clique.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/largest_clique.html + + +.. code-block:: default + + + just_largest = plot.subgraph(phat_graph.subgraph(largest_clique)) + plotly.offline.plot(just_largest, filename="just_largest.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/just_largest.html + +The ``p_hat300-1`` graph has several maximum cliques of size eight, +and we have managed to find them! What other graphs can you analyze using GBS? + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 1 minutes 8.666 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_max_clique.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_max_clique.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_max_clique.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_points.ipynb b/doc/tutorials_apps/run_tutorial_points.ipynb new file mode 100644 index 000000000..b681d67b5 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_points.ipynb @@ -0,0 +1,208 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nPoint processes\n===============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points`\n\nThis section shows how to generate GBS point process samples and use them to detect outlier\npoints in a data set. Point processes are models for generating random point patterns and can be\nuseful in machine learning, providing a source of randomness with\npreference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS\ndevices can be programmed to operate as special types of point processes that generate clustered\nrandom point patterns :cite:`jahangiri2019point`.\n\nThe probability of generating a specific pattern of points in GBS point processes depends on\nmatrix functions of a kernel matrix $K$ that describes the similarity between the points.\nMatrix functions that appear in GBS point processes are typically\n`permanents `__ and\n`hafnians `__. Here we use\nthe permanental point process, in which the probability of observing a pattern of points $S$\ndepends on the permanent of their corresponding kernel submatrix $K_S$ as\n:cite:`jahangiri2019point`:\n\n\\begin{align}\\mathcal{P}(S) = \\frac{1}{\\alpha(S)}\\text{per}(K_S),\\end{align}\n\nwhere $\\alpha$ is a normalization function that depends on $S$ and the average number\nof points. Let's look at a simple example to better understand the permanental point process.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We first import the modules we need. Note that the :mod:`~.apps.points` module has most of\nthe core functionalities exploring point processes.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nimport plotly\nfrom sklearn.datasets import make_blobs\nfrom strawberryfields.apps import points, plot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We define a space where the GBS point process patterns are generated. This\nspace is referred to as the state space and is defined by a set of points. The\npoint process selects a subset of these points in each sample. Here we create\na 20 $\\times$ 20 square grid of points.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "R = np.array([(i, j) for i in range(20) for j in range(20)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The rows of R are the coordinates of the points.\n\nNext step is to create the kernel matrix for the points of this discrete space. We call\nthe :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as:\n\n\\begin{align}K_{i,j} = e^{-\\|\\bf{r}_i-\\bf{r}_j\\|^2/2\\sigma^2},\\end{align}\n\nwhere $\\bf{r}_i$ are the coordinates of point $i$ and $\\sigma$ is a kernel\nparameter that determines the scale of the kernel.\n\nIn the RBF kernel, points that are much further than a distance $\\sigma$ from each other\nlead to small entries of the kernel matrix, whereas points much closer than $\\sigma$\ngenerate large entries. Now consider a specific point pattern in which all points\nare close to each other, which simply means that their matrix elements have larger entries. The\npermanent of a matrix is a sum over the product of some matrix entries. Therefore,\nthe submatrix that corresponds to those points has a large permanent and the probability of\nobserving them in a sample is larger.\n\nFor kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient\nquantum-inspired classical algorithms for permanental point process sampling\n:cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the\nquantum-inspired classical algorithm.\n\nLet's construct the RBF kernel with the parameter $\\sigma$ set to 2.5.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "K = points.rbf_kernel(R, 2.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We generate 10 samples with an average number of 50 points per sample by calling\nthe :func:`~.points.sample` function of the :mod:`~.apps.points` module.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "samples = points.sample(K, 50.0, 10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We visualize the first sample by using the :func:`~.points` function of\nthe :mod:`~.apps.plot` module. The point patterns generated by the permanental point process\nusually have a higher degree of clustering compared to a uniformly random pattern.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plot_1 = plot.points(R, samples[0], point_size=10)\n\nplotly.offline.plot(plot_1, filename=\"Points.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/Points.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_1.show()`` to view your graph.

\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Outlier Detection\n-----------------\n\nWhen the distribution of points in a given space is inhomogeneous, GBS point processes\nsample points from the dense regions with higher probability. This feature of the GBS point\nprocesses can be used to detect outlier points in a data set. In this example, we create two\ndense clusters and place them in a two-dimensional space containing some randomly distributed\npoints in the background. We consider the random background points as outliers to the clustered\npoints and show that the permanental point process selects points from the dense clusters with\na higher probability.\n\nWe first create the data points. The clusters have 50 points each and the points have a\nstandard deviation of 0.3. The clusters are centered at $[x = 2, y = 2]$ and $[x = 4,\ny = 4]$, respectively. We also add 25 randomly generated points to the data set.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0]\n\nnoise = np.random.rand(25, 2) * 6.0\n\nR = np.concatenate((clusters, noise))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then construct the kernel matrix and generate 10000 samples.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "K = points.rbf_kernel(R, 1.0)\n\nsamples = points.sample(K, 10.0, 10000)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We obtain the indices of 100 points that appear most frequently in the permanental point\nprocess samples and visualize them. The majority of the commonly appearing points belong\nto the clusters and the points that do not appear frequently are the outlier points. Note that\nsome of the background points might overlap with the clusters.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:]\n\nplot_2 = plot.points(\n R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10\n)\n\nplotly.offline.plot(plot_2, filename=\"Outliers.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/Outliers.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The two-dimensional examples considered here can be easily extended to higher dimensions. The\nGBS point processes retain their clustering property in higher dimensions but visual inspection\nof this clustering feature might not be very straightforward.\n\nGBS point processes can potentially be used in other applications such as clustering data points\nand finding correlations in time series data. Can you design your own example for using GBS point\nprocesses in a new application?\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_points.py b/doc/tutorials_apps/run_tutorial_points.py new file mode 100644 index 000000000..88677fb6a --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_points.py @@ -0,0 +1,156 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports,invalid-name +r""" +.. _apps-points-tutorial: + +Point processes +=============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points` + +This section shows how to generate GBS point process samples and use them to detect outlier +points in a data set. Point processes are models for generating random point patterns and can be +useful in machine learning, providing a source of randomness with +preference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS +devices can be programmed to operate as special types of point processes that generate clustered +random point patterns :cite:`jahangiri2019point`. + +The probability of generating a specific pattern of points in GBS point processes depends on +matrix functions of a kernel matrix :math:`K` that describes the similarity between the points. +Matrix functions that appear in GBS point processes are typically +`permanents `__ and +`hafnians `__. Here we use +the permanental point process, in which the probability of observing a pattern of points :math:`S` +depends on the permanent of their corresponding kernel submatrix :math:`K_S` as +:cite:`jahangiri2019point`: + +.. math:: + \mathcal{P}(S) = \frac{1}{\alpha(S)}\text{per}(K_S), + +where :math:`\alpha` is a normalization function that depends on :math:`S` and the average number +of points. Let's look at a simple example to better understand the permanental point process. +""" + +############################################################################## +# We first import the modules we need. Note that the :mod:`~.apps.points` module has most of +# the core functionalities exploring point processes. + +import numpy as np +import plotly +from sklearn.datasets import make_blobs +from strawberryfields.apps import points, plot + +############################################################################## +# We define a space where the GBS point process patterns are generated. This +# space is referred to as the state space and is defined by a set of points. The +# point process selects a subset of these points in each sample. Here we create +# a 20 :math:`\times` 20 square grid of points. + +R = np.array([(i, j) for i in range(20) for j in range(20)]) + +############################################################################## +# The rows of R are the coordinates of the points. +# +# Next step is to create the kernel matrix for the points of this discrete space. We call +# the :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as: +# +# .. math:: +# K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/2\sigma^2}, +# +# where :math:`\bf{r}_i` are the coordinates of point :math:`i` and :math:`\sigma` is a kernel +# parameter that determines the scale of the kernel. +# +# In the RBF kernel, points that are much further than a distance :math:`\sigma` from each other +# lead to small entries of the kernel matrix, whereas points much closer than :math:`\sigma` +# generate large entries. Now consider a specific point pattern in which all points +# are close to each other, which simply means that their matrix elements have larger entries. The +# permanent of a matrix is a sum over the product of some matrix entries. Therefore, +# the submatrix that corresponds to those points has a large permanent and the probability of +# observing them in a sample is larger. +# +# For kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient +# quantum-inspired classical algorithms for permanental point process sampling +# :cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the +# quantum-inspired classical algorithm. +# +# Let's construct the RBF kernel with the parameter :math:`\sigma` set to 2.5. + +K = points.rbf_kernel(R, 2.5) + +############################################################################## +# We generate 10 samples with an average number of 50 points per sample by calling +# the :func:`~.points.sample` function of the :mod:`~.apps.points` module. + +samples = points.sample(K, 50.0, 10) + +############################################################################## +# We visualize the first sample by using the :func:`~.points` function of +# the :mod:`~.apps.plot` module. The point patterns generated by the permanental point process +# usually have a higher degree of clustering compared to a uniformly random pattern. + +plot_1 = plot.points(R, samples[0], point_size=10) + +plotly.offline.plot(plot_1, filename="Points.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/Points.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``plot_1.show()`` to view your graph. + +############################################################################## +# Outlier Detection +# ----------------- +# +# When the distribution of points in a given space is inhomogeneous, GBS point processes +# sample points from the dense regions with higher probability. This feature of the GBS point +# processes can be used to detect outlier points in a data set. In this example, we create two +# dense clusters and place them in a two-dimensional space containing some randomly distributed +# points in the background. We consider the random background points as outliers to the clustered +# points and show that the permanental point process selects points from the dense clusters with +# a higher probability. +# +# We first create the data points. The clusters have 50 points each and the points have a +# standard deviation of 0.3. The clusters are centered at :math:`[x = 2, y = 2]` and :math:`[x = 4, +# y = 4]`, respectively. We also add 25 randomly generated points to the data set. + +clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0] + +noise = np.random.rand(25, 2) * 6.0 + +R = np.concatenate((clusters, noise)) + +############################################################################## +# Then construct the kernel matrix and generate 10000 samples. + +K = points.rbf_kernel(R, 1.0) + +samples = points.sample(K, 10.0, 10000) + +############################################################################## +# We obtain the indices of 100 points that appear most frequently in the permanental point +# process samples and visualize them. The majority of the commonly appearing points belong +# to the clusters and the points that do not appear frequently are the outlier points. Note that +# some of the background points might overlap with the clusters. + +gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:] + +plot_2 = plot.points( + R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10 +) + +plotly.offline.plot(plot_2, filename="Outliers.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/Outliers.html + +############################################################################## +# The two-dimensional examples considered here can be easily extended to higher dimensions. The +# GBS point processes retain their clustering property in higher dimensions but visual inspection +# of this clustering feature might not be very straightforward. +# +# GBS point processes can potentially be used in other applications such as clustering data points +# and finding correlations in time series data. Can you design your own example for using GBS point +# processes in a new application? diff --git a/doc/tutorials_apps/run_tutorial_points.py.md5 b/doc/tutorials_apps/run_tutorial_points.py.md5 new file mode 100644 index 000000000..413dcf197 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_points.py.md5 @@ -0,0 +1 @@ +af38ca5ddec02ac26a4470b10b9a5c9e \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_points.rst b/doc/tutorials_apps/run_tutorial_points.rst new file mode 100644 index 000000000..2fb4ffb5b --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_points.rst @@ -0,0 +1,264 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_points.py: + + +.. _apps-points-tutorial: + +Point processes +=============== + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points` + +This section shows how to generate GBS point process samples and use them to detect outlier +points in a data set. Point processes are models for generating random point patterns and can be +useful in machine learning, providing a source of randomness with +preference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS +devices can be programmed to operate as special types of point processes that generate clustered +random point patterns :cite:`jahangiri2019point`. + +The probability of generating a specific pattern of points in GBS point processes depends on +matrix functions of a kernel matrix :math:`K` that describes the similarity between the points. +Matrix functions that appear in GBS point processes are typically +`permanents `__ and +`hafnians `__. Here we use +the permanental point process, in which the probability of observing a pattern of points :math:`S` +depends on the permanent of their corresponding kernel submatrix :math:`K_S` as +:cite:`jahangiri2019point`: + +.. math:: + \mathcal{P}(S) = \frac{1}{\alpha(S)}\text{per}(K_S), + +where :math:`\alpha` is a normalization function that depends on :math:`S` and the average number +of points. Let's look at a simple example to better understand the permanental point process. + +We first import the modules we need. Note that the :mod:`~.apps.points` module has most of +the core functionalities exploring point processes. + + +.. code-block:: default + + + import numpy as np + import plotly + from sklearn.datasets import make_blobs + from strawberryfields.apps import points, plot + + + + + + + +We define a space where the GBS point process patterns are generated. This +space is referred to as the state space and is defined by a set of points. The +point process selects a subset of these points in each sample. Here we create +a 20 :math:`\times` 20 square grid of points. + + +.. code-block:: default + + + R = np.array([(i, j) for i in range(20) for j in range(20)]) + + + + + + + +The rows of R are the coordinates of the points. + +Next step is to create the kernel matrix for the points of this discrete space. We call +the :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as: + +.. math:: + K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/2\sigma^2}, + +where :math:`\bf{r}_i` are the coordinates of point :math:`i` and :math:`\sigma` is a kernel +parameter that determines the scale of the kernel. + +In the RBF kernel, points that are much further than a distance :math:`\sigma` from each other +lead to small entries of the kernel matrix, whereas points much closer than :math:`\sigma` +generate large entries. Now consider a specific point pattern in which all points +are close to each other, which simply means that their matrix elements have larger entries. The +permanent of a matrix is a sum over the product of some matrix entries. Therefore, +the submatrix that corresponds to those points has a large permanent and the probability of +observing them in a sample is larger. + +For kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient +quantum-inspired classical algorithms for permanental point process sampling +:cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the +quantum-inspired classical algorithm. + +Let's construct the RBF kernel with the parameter :math:`\sigma` set to 2.5. + + +.. code-block:: default + + + K = points.rbf_kernel(R, 2.5) + + + + + + + +We generate 10 samples with an average number of 50 points per sample by calling +the :func:`~.points.sample` function of the :mod:`~.apps.points` module. + + +.. code-block:: default + + + samples = points.sample(K, 50.0, 10) + + + + + + + +We visualize the first sample by using the :func:`~.points` function of +the :mod:`~.apps.plot` module. The point patterns generated by the permanental point process +usually have a higher degree of clustering compared to a uniformly random pattern. + + +.. code-block:: default + + + plot_1 = plot.points(R, samples[0], point_size=10) + + plotly.offline.plot(plot_1, filename="Points.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/Points.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``plot_1.show()`` to view your graph. + +Outlier Detection +----------------- + +When the distribution of points in a given space is inhomogeneous, GBS point processes +sample points from the dense regions with higher probability. This feature of the GBS point +processes can be used to detect outlier points in a data set. In this example, we create two +dense clusters and place them in a two-dimensional space containing some randomly distributed +points in the background. We consider the random background points as outliers to the clustered +points and show that the permanental point process selects points from the dense clusters with +a higher probability. + +We first create the data points. The clusters have 50 points each and the points have a +standard deviation of 0.3. The clusters are centered at :math:`[x = 2, y = 2]` and :math:`[x = 4, +y = 4]`, respectively. We also add 25 randomly generated points to the data set. + + +.. code-block:: default + + + clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0] + + noise = np.random.rand(25, 2) * 6.0 + + R = np.concatenate((clusters, noise)) + + + + + + + +Then construct the kernel matrix and generate 10000 samples. + + +.. code-block:: default + + + K = points.rbf_kernel(R, 1.0) + + samples = points.sample(K, 10.0, 10000) + + + + + + + +We obtain the indices of 100 points that appear most frequently in the permanental point +process samples and visualize them. The majority of the commonly appearing points belong +to the clusters and the points that do not appear frequently are the outlier points. Note that +some of the background points might overlap with the clusters. + + +.. code-block:: default + + + gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:] + + plot_2 = plot.points( + R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10 + ) + + plotly.offline.plot(plot_2, filename="Outliers.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/Outliers.html + +The two-dimensional examples considered here can be easily extended to higher dimensions. The +GBS point processes retain their clustering property in higher dimensions but visual inspection +of this clustering feature might not be very straightforward. + +GBS point processes can potentially be used in other applications such as clustering data points +and finding correlations in time series data. Can you design your own example for using GBS point +processes in a new application? + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 13.880 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_points.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_points.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_points.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_sample.ipynb b/doc/tutorials_apps/run_tutorial_sample.ipynb new file mode 100644 index 000000000..e15bc8370 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_sample.ipynb @@ -0,0 +1,151 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nSampling from GBS\n=================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample`\n\nA GBS device can be programmed to sample from any symmetric matrix $A$. To sample,\nwe must specify the mean number of photons being generated in the device and optionally the form of\ndetection used at the output: threshold detection or photon-number resolving (PNR) detection.\nThreshold detectors are restricted to measuring whether photons have arrived at the detector,\nwhereas PNR detectors are able to count the number of photons. Photon loss can also be specified\nwith the ``loss`` argument.\n\nSampling functionality is provided in the :mod:`~.apps.sample` module.\n\nLet's take a look at both types of sampling methods. We can generate samples from a random\n5-dimensional symmetric matrix:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import sample\nimport numpy as np\n\nmodes = 5\nn_mean = 6\nsamples = 5\n\nA = np.random.normal(0, 1, (modes, modes))\nA = A + A.T\n\ns_thresh = sample.sample(A, n_mean, samples, threshold=True)\ns_pnr = sample.sample(A, n_mean, samples, threshold=False)\n\nprint(s_thresh)\nprint(s_pnr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``.\nThreshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were\ndetected in a mode. A ``1`` here is conventionally called a \"click\". PNR samples are\nnon-negative integers counting the number of photons detected in each mode. For example,\nsuppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0,\n1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If\nthreshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``.\n\nA more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure\nGaussian states.\n\nSampling subgraphs\n------------------\n\nSo when would threshold detection or PNR detection be preferred in GBS? Since threshold samples\ncan be post-processed from PNR samples, we might expect that PNR detection is always the\npreferred choice. However, in practice *simulating* PNR-based GBS is significantly slower,\nand it turns out that threshold samples can provide enough useful information for a range of\napplications.\n\nStrawberry Fields provides tools for solving graph-based problems. In this setting,\nwe typically want to use GBS to sample subgraphs, which are likely to be dense due to the\nprobability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling\nis enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a\nsmall fixed graph as an example:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import plot\nimport networkx as nx\nimport plotly\n\nadj = np.array(\n [\n [0, 1, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [1, 1, 1, 1, 0, 1],\n [1, 1, 0, 0, 1, 0],\n ]\n)\n\ngraph = nx.Graph(adj)\nplot_graph = plot.graph(graph)\n\nplotly.offline.plot(plot_graph, filename=\"random_graph.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/random_graph.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view your graph.

\n\nThis is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect\nto be able to sample dense subgraphs with high probability.\n\nSamples can be generated from this graph through GBS using the :func:`~.apps.sample.sample`\nfunction:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "n_mean = 4\nsamples = 20\n\ns = sample.sample(adj, n_mean, samples)\n\nprint(s[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s\nfor nodes that haven't. We want to convert a sample to another representation where the result\nis a list of modes that have clicked. This list of modes can be used to select a subgraph.\nFor example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are\nthe selected nodes of the corresponding subgraph.\n\nHowever, the number of clicks in GBS is a random variable and we are not always guaranteed to\nhave enough clicks in a sample for the resultant subgraph to be of interest. We can filter out\nthe uninteresting samples using the :func:`~.apps.sample.postselect` function:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "min_clicks = 3\nmax_clicks = 4\n\ns = sample.postselect(s, min_clicks, max_clicks)\n\nprint(len(s))\ns.append([0, 1, 0, 1, 1, 0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As expected, we have fewer samples than before. The number of samples that survive this\npostselection is determined by the mean photon number in GBS. We have also added in our example\nsample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following.\n\nLet's convert our postselected samples to subgraphs:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "subgraphs = sample.to_subgraphs(s, graph)\n\nprint(subgraphs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can take a look at one of the sampled subgraphs:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename=\"subgraph.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/subgraph.html\n\nThese sampled subgraphs act as the starting point for some of the applications made available\nin Strawberry Fields, including the maximum clique and dense subgraph identification problems.\n\n

Note

Simulating GBS can be computationally intensive when using both threshold and PNR\n detectors. After all, we are using a classical algorithm to simulate a quantum process!\n To help users get to grips with the applications of Strawberry Fields as quickly as\n possible, we have provided datasets of pre-calculated GBS samples. These datasets are\n available in the :mod:`~.apps.data` module.

\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_sample.py b/doc/tutorials_apps/run_tutorial_sample.py new file mode 100644 index 000000000..c7f1bff3c --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_sample.py @@ -0,0 +1,154 @@ +# pylint: disable=invalid-name,no-member,wrong-import-position,wrong-import-order,ungrouped-imports +""" +.. _apps-sample-tutorial: + +Sampling from GBS +================= + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample` + +A GBS device can be programmed to sample from any symmetric matrix :math:`A`. To sample, +we must specify the mean number of photons being generated in the device and optionally the form of +detection used at the output: threshold detection or photon-number resolving (PNR) detection. +Threshold detectors are restricted to measuring whether photons have arrived at the detector, +whereas PNR detectors are able to count the number of photons. Photon loss can also be specified +with the ``loss`` argument. + +Sampling functionality is provided in the :mod:`~.apps.sample` module. + +Let's take a look at both types of sampling methods. We can generate samples from a random +5-dimensional symmetric matrix: +""" + +from strawberryfields.apps import sample +import numpy as np + +modes = 5 +n_mean = 6 +samples = 5 + +A = np.random.normal(0, 1, (modes, modes)) +A = A + A.T + +s_thresh = sample.sample(A, n_mean, samples, threshold=True) +s_pnr = sample.sample(A, n_mean, samples, threshold=False) + +print(s_thresh) +print(s_pnr) + +############################################################################## +# In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``. +# Threshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were +# detected in a mode. A ``1`` here is conventionally called a "click". PNR samples are +# non-negative integers counting the number of photons detected in each mode. For example, +# suppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0, +# 1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If +# threshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``. +# +# A more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure +# Gaussian states. +# +# Sampling subgraphs +# ------------------ +# +# So when would threshold detection or PNR detection be preferred in GBS? Since threshold samples +# can be post-processed from PNR samples, we might expect that PNR detection is always the +# preferred choice. However, in practice *simulating* PNR-based GBS is significantly slower, +# and it turns out that threshold samples can provide enough useful information for a range of +# applications. +# +# Strawberry Fields provides tools for solving graph-based problems. In this setting, +# we typically want to use GBS to sample subgraphs, which are likely to be dense due to the +# probability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling +# is enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a +# small fixed graph as an example: + +from strawberryfields.apps import plot +import networkx as nx +import plotly + +adj = np.array( + [ + [0, 1, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 1], + [0, 1, 0, 1, 1, 0], + [0, 0, 1, 0, 1, 0], + [1, 1, 1, 1, 0, 1], + [1, 1, 0, 0, 1, 0], + ] +) + +graph = nx.Graph(adj) +plot_graph = plot.graph(graph) + +plotly.offline.plot(plot_graph, filename="random_graph.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/random_graph.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``plot_graph.show()`` to view your graph. +# +# This is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect +# to be able to sample dense subgraphs with high probability. +# +# Samples can be generated from this graph through GBS using the :func:`~.apps.sample.sample` +# function: + +n_mean = 4 +samples = 20 + +s = sample.sample(adj, n_mean, samples) + +print(s[:5]) + +############################################################################## +# Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s +# for nodes that haven't. We want to convert a sample to another representation where the result +# is a list of modes that have clicked. This list of modes can be used to select a subgraph. +# For example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are +# the selected nodes of the corresponding subgraph. +# +# However, the number of clicks in GBS is a random variable and we are not always guaranteed to +# have enough clicks in a sample for the resultant subgraph to be of interest. We can filter out +# the uninteresting samples using the :func:`~.apps.sample.postselect` function: + +min_clicks = 3 +max_clicks = 4 + +s = sample.postselect(s, min_clicks, max_clicks) + +print(len(s)) +s.append([0, 1, 0, 1, 1, 0]) + +############################################################################## +# As expected, we have fewer samples than before. The number of samples that survive this +# postselection is determined by the mean photon number in GBS. We have also added in our example +# sample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following. +# +# Let's convert our postselected samples to subgraphs: + +subgraphs = sample.to_subgraphs(s, graph) + +print(subgraphs) + +############################################################################## +# We can take a look at one of the sampled subgraphs: + +plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename="subgraph.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/subgraph.html +# +# These sampled subgraphs act as the starting point for some of the applications made available +# in Strawberry Fields, including the maximum clique and dense subgraph identification problems. +# +# .. note:: +# Simulating GBS can be computationally intensive when using both threshold and PNR +# detectors. After all, we are using a classical algorithm to simulate a quantum process! +# To help users get to grips with the applications of Strawberry Fields as quickly as +# possible, we have provided datasets of pre-calculated GBS samples. These datasets are +# available in the :mod:`~.apps.data` module. diff --git a/doc/tutorials_apps/run_tutorial_sample.py.md5 b/doc/tutorials_apps/run_tutorial_sample.py.md5 new file mode 100644 index 000000000..87a137b7b --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_sample.py.md5 @@ -0,0 +1 @@ +53d531d0740680498035b24e273028d0 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_sample.rst b/doc/tutorials_apps/run_tutorial_sample.rst new file mode 100644 index 000000000..b1afb773d --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_sample.rst @@ -0,0 +1,276 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_sample.py: + + +.. _apps-sample-tutorial: + +Sampling from GBS +================= + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample` + +A GBS device can be programmed to sample from any symmetric matrix :math:`A`. To sample, +we must specify the mean number of photons being generated in the device and optionally the form of +detection used at the output: threshold detection or photon-number resolving (PNR) detection. +Threshold detectors are restricted to measuring whether photons have arrived at the detector, +whereas PNR detectors are able to count the number of photons. Photon loss can also be specified +with the ``loss`` argument. + +Sampling functionality is provided in the :mod:`~.apps.sample` module. + +Let's take a look at both types of sampling methods. We can generate samples from a random +5-dimensional symmetric matrix: + + +.. code-block:: default + + + from strawberryfields.apps import sample + import numpy as np + + modes = 5 + n_mean = 6 + samples = 5 + + A = np.random.normal(0, 1, (modes, modes)) + A = A + A.T + + s_thresh = sample.sample(A, n_mean, samples, threshold=True) + s_pnr = sample.sample(A, n_mean, samples, threshold=False) + + print(s_thresh) + print(s_pnr) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [[1, 0, 1, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 1, 1], [1, 0, 0, 1, 0], [1, 1, 0, 1, 1]] + [[0, 0, 0, 0, 0], [4, 1, 0, 4, 1], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [1, 0, 0, 3, 0]] + + +In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``. +Threshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were +detected in a mode. A ``1`` here is conventionally called a "click". PNR samples are +non-negative integers counting the number of photons detected in each mode. For example, +suppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0, +1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If +threshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``. + +A more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure +Gaussian states. + +Sampling subgraphs +------------------ + +So when would threshold detection or PNR detection be preferred in GBS? Since threshold samples +can be post-processed from PNR samples, we might expect that PNR detection is always the +preferred choice. However, in practice *simulating* PNR-based GBS is significantly slower, +and it turns out that threshold samples can provide enough useful information for a range of +applications. + +Strawberry Fields provides tools for solving graph-based problems. In this setting, +we typically want to use GBS to sample subgraphs, which are likely to be dense due to the +probability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling +is enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a +small fixed graph as an example: + + +.. code-block:: default + + + from strawberryfields.apps import plot + import networkx as nx + import plotly + + adj = np.array( + [ + [0, 1, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 1], + [0, 1, 0, 1, 1, 0], + [0, 0, 1, 0, 1, 0], + [1, 1, 1, 1, 0, 1], + [1, 1, 0, 0, 1, 0], + ] + ) + + graph = nx.Graph(adj) + plot_graph = plot.graph(graph) + + plotly.offline.plot(plot_graph, filename="random_graph.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/random_graph.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``plot_graph.show()`` to view your graph. + +This is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect +to be able to sample dense subgraphs with high probability. + +Samples can be generated from this graph through GBS using the :func:`~.apps.sample.sample` +function: + + +.. code-block:: default + + + n_mean = 4 + samples = 20 + + s = sample.sample(adj, n_mean, samples) + + print(s[:5]) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] + + +Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s +for nodes that haven't. We want to convert a sample to another representation where the result +is a list of modes that have clicked. This list of modes can be used to select a subgraph. +For example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are +the selected nodes of the corresponding subgraph. + +However, the number of clicks in GBS is a random variable and we are not always guaranteed to +have enough clicks in a sample for the resultant subgraph to be of interest. We can filter out +the uninteresting samples using the :func:`~.apps.sample.postselect` function: + + +.. code-block:: default + + + min_clicks = 3 + max_clicks = 4 + + s = sample.postselect(s, min_clicks, max_clicks) + + print(len(s)) + s.append([0, 1, 0, 1, 1, 0]) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 4 + + +As expected, we have fewer samples than before. The number of samples that survive this +postselection is determined by the mean photon number in GBS. We have also added in our example +sample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following. + +Let's convert our postselected samples to subgraphs: + + +.. code-block:: default + + + subgraphs = sample.to_subgraphs(s, graph) + + print(subgraphs) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [[0, 1, 4, 5], [0, 3, 4], [1, 2, 4], [1, 2, 3, 4], [1, 3, 4]] + + +We can take a look at one of the sampled subgraphs: + + +.. code-block:: default + + + plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename="subgraph.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/subgraph.html + +These sampled subgraphs act as the starting point for some of the applications made available +in Strawberry Fields, including the maximum clique and dense subgraph identification problems. + +.. note:: + Simulating GBS can be computationally intensive when using both threshold and PNR + detectors. After all, we are using a classical algorithm to simulate a quantum process! + To help users get to grips with the applications of Strawberry Fields as quickly as + possible, we have provided datasets of pre-calculated GBS samples. These datasets are + available in the :mod:`~.apps.data` module. + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 8.272 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_sample.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_sample.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_sample.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_similarity.ipynb b/doc/tutorials_apps/run_tutorial_similarity.ipynb new file mode 100644 index 000000000..84615da86 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_similarity.ipynb @@ -0,0 +1,392 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nGraph similarity\n================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity`\n\nThis page looks at how to use GBS to construct a similarity measure between graphs,\nknown as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based\ndata for machine learning tasks such as classification using a support vector machine.\n\nGraph data\n----------\n\nWe begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs,\nwhich will be needed in the following.\n\nLet's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a\ndataset of 188 different graphs that each correspond to the structure of a chemical compound. Our\ngoal is to use GBS samples from these graphs to measure their similarity.\n\nThe :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG\ndataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting\nphoton click events. We'll start by loading four sets of samples and visualizing the\ncorresponding graphs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import data, plot, similarity\n\nm0 = data.Mutag0()\nm1 = data.Mutag1()\nm2 = data.Mutag2()\nm3 = data.Mutag3()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These datasets contain both the adjacency matrix of the graph and the samples generated through\nGBS. We can access the adjacency matrix through:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "m0_a = m0.adj\nm1_a = m1.adj\nm2_a = m2.adj\nm3_a = m3.adj" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Samples from these graphs can be accessed by indexing:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(m0[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module,\nwe need to convert the adjacency matrices into NetworkX Graphs:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import networkx as nx\nimport plotly\n\nplot_mutag_0 = plot.graph(nx.Graph(m0_a))\nplot_mutag_1 = plot.graph(nx.Graph(m1_a))\nplot_mutag_2 = plot.graph(nx.Graph(m2_a))\nplot_mutag_3 = plot.graph(nx.Graph(m3_a))\n\nplotly.offline.plot(plot_mutag_0, filename=\"MUTAG_0.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/MUTAG_0.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_mutag_0.show()`` to view your graph.

\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plotly.offline.plot(plot_mutag_1, filename=\"MUTAG_1.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/MUTAG_1.html\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plotly.offline.plot(plot_mutag_2, filename=\"MUTAG_2.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/MUTAG_2.html\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plotly.offline.plot(plot_mutag_3, filename=\"MUTAG_3.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/MUTAG_3.html\n\nThe graphs of ``m1_a`` and ``m2_a`` look very similar. In fact,\nit turns out that they are *isomorphic* to each other, which means that the graphs can be made\nidentical by permuting their node labels.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Creating a feature vector\n-------------------------\n\nFollowing :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph.\nThese feature vectors contain information about the graphs and can be viewed as a mapping to a\nhigh-dimensional feature space, a technique often used in machine learning that allows us to\nemploy properties of the feature space to separate and classify the vectors.\n\nThe feature vector of a graph can be composed in a variety of ways. One approach is to\nassociate features with the relative frequencies of certain types of measurements being\nrecorded from a GBS device configured to sample from the graph, as we now discuss.\n\nWe begin by defining the concept of an *orbit*, which is the set of all GBS samples that are\nequivalent under permutation of the modes. A sample can be converted to its corresponding orbit\nusing the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0,\n0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.sample_to_orbit(m0[0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples\ncan be randomly generated from the ``[1, 1]`` orbit using:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.orbit_to_sample([1, 1], modes=m0.modes))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are\nstatistically more likely to be observed. However, we are interested in coarse-graining further\ninto *events*, which correspond to a combination of orbits with the same photon number such\nthat the number of photons counted in each mode does not exceed a fixed value\n``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon\nnumber of 5:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(list(similarity.orbits(5)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "All 5-photon samples belong to one of the orbits above. A 5-photon event with\n``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1],\n[3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example,\nthe sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have mastered orbits and events, how can we make a feature vector? It was shown in\n:cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the\nfrequencies of events. Specifically, for a $k$ photon event $E_{k, n_{\\max}}$\nwith maximum count per mode $n_{\\max}$ and corresponding probability $p_{k,\nn_{\\max}}:=p_{E_{k, n_{\\max}}}(G)$ with respect to a graph $G$, a feature vector can be\nwritten as\n\n\\begin{align}f_{\\mathbf{k}, n_{\\max}} = (p_{k_{1}, n_{\\max}}, p_{k_{2}, n_{\\max}}, \\ldots , p_{k_{K},\n n_{\\max}}),\\end{align}\n\nwhere $\\mathbf{k} := (k_{1}, k_{2}, \\ldots , k_{K})$ is a list of different total photon\nnumbers.\n\nFor example, if $\\mathbf{k} := (2, 4, 6)$ and $n_{\\max} = 2$, we have\n\n\\begin{align}f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}).\\end{align}\n\nIn this case, we are interested in the probabilities of events $E_{2, 2}$, $E_{4,\n2}$, and $E_{6, 2}$. Suppose we are sampling from a four-mode device and have the samples\n``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and\n``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the $E_{4, 2}$ event while\n``[2, 1, 1]`` is.\n\nCalculating a feature vector\n----------------------------\n\nWe provide two methods for calculating a feature vector of GBS event probabilities in\nStrawberry Fields:\n\n1. Through sampling.\n2. Using a Monte Carlo estimate of the probability.\n\nIn the first method, all one needs to do is generate some GBS samples from the graph of\ninterest and fix the composition of the feature vector. For example, for a feature vector\n$f_{\\mathbf{k} = (2, 4, 6), n_{\\max}=2}$ we use:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the second method, suppose we want to calculate the event probabilities exactly rather than\nthrough sampling. To do this, we consider the event probability $p_{k, n_{\\max}}$ as the\nsum over all sample probabilities in the event. In GBS, each sample probability is determined by\nthe hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes\ncalculating $p_{k, n_{\\max}}$ really challenging is the number of samples the corresponding\nevent contains! For example, the 6-photon event over 17 modes $E_{k=6, n_{\\max}=2}$\ncontains the following number of samples :\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.event_cardinality(6, 2, 17))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To avoid calculating a large number of sample probabilities, an alternative is to perform a\nMonte Carlo approximation. Here, samples within an event are selected uniformly at random and\ntheir resultant probabilities are calculated. If $N$ samples $\\{S_{1}, S_{2},\n\\ldots , S_{N}\\}$ are generated, then the event probability can be approximated as\n\n\\begin{align}p(E_{k, n_{\\max}}) \\approx \\frac{1}{N}\\sum_{i=1}^N p(S_i) |E_{k, n_{\\max}}|,\\end{align}\n\nwith $|E_{k, n_{\\max}}|$ denoting the cardinality of the event.\n\nThis method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is\napproximated as:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The feature vector can then be calculated through Monte Carlo sampling using\n:func:`~.feature_vector_mc`.\n\n

Note

The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and\n may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy\n but slow down calculation.

\n\nThe second method of Monte Carlo approximation is intended for use in scenarios where it is\ncomputationally intensive to pre-calculate a statistically significant dataset of samples from\nGBS.\n\nMachine learning with GBS graph kernels\n---------------------------------------\n\nThe power of feature vectors that embed graphs in a vector space of real numbers is that we can\nnow measure similarities between graphs. This is very useful in machine learning, where similar\nlabels are assigned to graphs that are close to each other. GBS feature vectors therefore give\nrise to a similarity measure between graphs!\n\nLet's build this up a bit more. The MUTAG dataset we are considering contains not only graphs\ncorresponding to the structure of chemical compounds, but also a *label* of each\ncompound based upon its mutagenic effect. The four graphs we consider here have labels:\n\n- MUTAG0: Class 1\n- MUTAG1: Class 0\n- MUTAG2: Class 0\n- MUTAG3: Class 1\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "classes = [1, 0, 0, 1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use GBS feature vectors in a `support vector machine\n`__ (SVM) that finds a separating\nhyperplane between classes in the feature space. We start by defining two-dimensional feature\nvectors:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "events = [8, 10]\nmax_count = 2\n\nf1 = similarity.feature_vector_sampling(m0, events, max_count)\nf2 = similarity.feature_vector_sampling(m1, events, max_count)\nf3 = similarity.feature_vector_sampling(m2, events, max_count)\nf4 = similarity.feature_vector_sampling(m3, events, max_count)\n\nimport numpy as np\n\nR = np.array([f1, f2, f3, f4])\n\nprint(R)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is freedom in the choice of ``events`` composing the feature vectors and we encourage the\nreader to explore different combinations. Note, however, that odd photon-numbered events have\nzero probability because ideal GBS only generates and outputs pairs of photons.\n\nGiven our points in the feature space and their target labels, we can use\nscikit-learn's Support Vector Machine `LinearSVC `__ as our model to train:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\n\nR_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance\n\nclassifier = LinearSVC()\nclassifier.fit(R_scaled, classes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, the term \"linear\" refers to the *kernel* function used to calculate inner products\nbetween vectors in the space. We can use a linear SVM because we have already embedded the\ngraphs in a feature space based on GBS. We have also rescaled the feature vectors so that they\nzero mean and unit variance using scikit-learn's ``StandardScaler``, a technique\n`often used `__ in machine learning.\n\nWe can then visualize the trained SVM by plotting the decision boundary with respect to the\npoints:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "w = classifier.coef_[0]\ni = classifier.intercept_[0]\n\nm = -w[0] / w[1] # finding the values for y = mx + b\nb = -i / w[1]\n\nxx = [-1, 1]\nyy = [m * x + b for x in xx]\n\nfig = plot.points(R_scaled, classes)\nfig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode=\"lines\"))\n\nplotly.offline.plot(fig, filename=\"SVM.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/SVM.html\n\nThis plot shows the two classes (grey points for class 0 and red points for class 1)\nsuccessfully separated by the linear hyperplane using the GBS feature space. Moreover,\nrecall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly,\ntheir corresponding feature vectors are very similar. In fact, the feature vectors of\nisomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy\nin this plot is due to the statistical approximation from sampling.\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_similarity.py b/doc/tutorials_apps/run_tutorial_similarity.py new file mode 100644 index 000000000..e4b985a9b --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_similarity.py @@ -0,0 +1,314 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports,invalid-name +""" +.. _apps-sim-tutorial: + +Graph similarity +================ + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity` + +This page looks at how to use GBS to construct a similarity measure between graphs, +known as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based +data for machine learning tasks such as classification using a support vector machine. + +Graph data +---------- + +We begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs, +which will be needed in the following. + +Let's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a +dataset of 188 different graphs that each correspond to the structure of a chemical compound. Our +goal is to use GBS samples from these graphs to measure their similarity. + +The :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG +dataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting +photon click events. We'll start by loading four sets of samples and visualizing the +corresponding graphs. +""" + +from strawberryfields.apps import data, plot, similarity + +m0 = data.Mutag0() +m1 = data.Mutag1() +m2 = data.Mutag2() +m3 = data.Mutag3() + +############################################################################## +# These datasets contain both the adjacency matrix of the graph and the samples generated through +# GBS. We can access the adjacency matrix through: + +m0_a = m0.adj +m1_a = m1.adj +m2_a = m2.adj +m3_a = m3.adj + +############################################################################## +# Samples from these graphs can be accessed by indexing: + +print(m0[0]) + +############################################################################## +# We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module, +# we need to convert the adjacency matrices into NetworkX Graphs: + +import networkx as nx +import plotly + +plot_mutag_0 = plot.graph(nx.Graph(m0_a)) +plot_mutag_1 = plot.graph(nx.Graph(m1_a)) +plot_mutag_2 = plot.graph(nx.Graph(m2_a)) +plot_mutag_3 = plot.graph(nx.Graph(m3_a)) + +plotly.offline.plot(plot_mutag_0, filename="MUTAG_0.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/MUTAG_0.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``plot_mutag_0.show()`` to view your graph. + +plotly.offline.plot(plot_mutag_1, filename="MUTAG_1.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/MUTAG_1.html + +plotly.offline.plot(plot_mutag_2, filename="MUTAG_2.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/MUTAG_2.html + +plotly.offline.plot(plot_mutag_3, filename="MUTAG_3.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/MUTAG_3.html +# +# The graphs of ``m1_a`` and ``m2_a`` look very similar. In fact, +# it turns out that they are *isomorphic* to each other, which means that the graphs can be made +# identical by permuting their node labels. + +############################################################################## +# Creating a feature vector +# ------------------------- +# +# Following :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph. +# These feature vectors contain information about the graphs and can be viewed as a mapping to a +# high-dimensional feature space, a technique often used in machine learning that allows us to +# employ properties of the feature space to separate and classify the vectors. +# +# The feature vector of a graph can be composed in a variety of ways. One approach is to +# associate features with the relative frequencies of certain types of measurements being +# recorded from a GBS device configured to sample from the graph, as we now discuss. +# +# We begin by defining the concept of an *orbit*, which is the set of all GBS samples that are +# equivalent under permutation of the modes. A sample can be converted to its corresponding orbit +# using the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0, +# 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit: + +print(similarity.sample_to_orbit(m0[0])) + +############################################################################## +# Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples +# can be randomly generated from the ``[1, 1]`` orbit using: + +print(similarity.orbit_to_sample([1, 1], modes=m0.modes)) + +############################################################################## +# Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are +# statistically more likely to be observed. However, we are interested in coarse-graining further +# into *events*, which correspond to a combination of orbits with the same photon number such +# that the number of photons counted in each mode does not exceed a fixed value +# ``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon +# number of 5: + +print(list(similarity.orbits(5))) + +############################################################################## +# All 5-photon samples belong to one of the orbits above. A 5-photon event with +# ``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1], +# [3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example, +# the sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event: + +print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3)) + +############################################################################## +# Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event: + +print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)) + +############################################################################## +# Now that we have mastered orbits and events, how can we make a feature vector? It was shown in +# :cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the +# frequencies of events. Specifically, for a :math:`k` photon event :math:`E_{k, n_{\max}}` +# with maximum count per mode :math:`n_{\max}` and corresponding probability :math:`p_{k, +# n_{\max}}:=p_{E_{k, n_{\max}}}(G)` with respect to a graph :math:`G`, a feature vector can be +# written as +# +# .. math:: +# f_{\mathbf{k}, n_{\max}} = (p_{k_{1}, n_{\max}}, p_{k_{2}, n_{\max}}, \ldots , p_{k_{K}, +# n_{\max}}), +# +# where :math:`\mathbf{k} := (k_{1}, k_{2}, \ldots , k_{K})` is a list of different total photon +# numbers. +# +# For example, if :math:`\mathbf{k} := (2, 4, 6)` and :math:`n_{\max} = 2`, we have +# +# .. math:: +# f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}). +# +# In this case, we are interested in the probabilities of events :math:`E_{2, 2}`, :math:`E_{4, +# 2}`, and :math:`E_{6, 2}`. Suppose we are sampling from a four-mode device and have the samples +# ``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and +# ``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the :math:`E_{4, 2}` event while +# ``[2, 1, 1]`` is. +# +# Calculating a feature vector +# ---------------------------- +# +# We provide two methods for calculating a feature vector of GBS event probabilities in +# Strawberry Fields: +# +# 1. Through sampling. +# 2. Using a Monte Carlo estimate of the probability. +# +# In the first method, all one needs to do is generate some GBS samples from the graph of +# interest and fix the composition of the feature vector. For example, for a feature vector +# :math:`f_{\mathbf{k} = (2, 4, 6), n_{\max}=2}` we use: + +print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2)) + +############################################################################## +# For the second method, suppose we want to calculate the event probabilities exactly rather than +# through sampling. To do this, we consider the event probability :math:`p_{k, n_{\max}}` as the +# sum over all sample probabilities in the event. In GBS, each sample probability is determined by +# the hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes +# calculating :math:`p_{k, n_{\max}}` really challenging is the number of samples the corresponding +# event contains! For example, the 6-photon event over 17 modes :math:`E_{k=6, n_{\max}=2}` +# contains the following number of samples : + +print(similarity.event_cardinality(6, 2, 17)) + +############################################################################## +# To avoid calculating a large number of sample probabilities, an alternative is to perform a +# Monte Carlo approximation. Here, samples within an event are selected uniformly at random and +# their resultant probabilities are calculated. If :math:`N` samples :math:`\{S_{1}, S_{2}, +# \ldots , S_{N}\}` are generated, then the event probability can be approximated as +# +# .. math:: +# p(E_{k, n_{\max}}) \approx \frac{1}{N}\sum_{i=1}^N p(S_i) |E_{k, n_{\max}}|, +# +# with :math:`|E_{k, n_{\max}}|` denoting the cardinality of the event. +# +# This method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is +# approximated as: + +print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6)) + +############################################################################## +# The feature vector can then be calculated through Monte Carlo sampling using +# :func:`~.feature_vector_mc`. +# +# .. note:: +# The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and +# may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy +# but slow down calculation. +# +# The second method of Monte Carlo approximation is intended for use in scenarios where it is +# computationally intensive to pre-calculate a statistically significant dataset of samples from +# GBS. +# +# Machine learning with GBS graph kernels +# --------------------------------------- +# +# The power of feature vectors that embed graphs in a vector space of real numbers is that we can +# now measure similarities between graphs. This is very useful in machine learning, where similar +# labels are assigned to graphs that are close to each other. GBS feature vectors therefore give +# rise to a similarity measure between graphs! +# +# Let's build this up a bit more. The MUTAG dataset we are considering contains not only graphs +# corresponding to the structure of chemical compounds, but also a *label* of each +# compound based upon its mutagenic effect. The four graphs we consider here have labels: +# +# - MUTAG0: Class 1 +# - MUTAG1: Class 0 +# - MUTAG2: Class 0 +# - MUTAG3: Class 1 + +classes = [1, 0, 0, 1] + +############################################################################## +# We can use GBS feature vectors in a `support vector machine +# `__ (SVM) that finds a separating +# hyperplane between classes in the feature space. We start by defining two-dimensional feature +# vectors: + +events = [8, 10] +max_count = 2 + +f1 = similarity.feature_vector_sampling(m0, events, max_count) +f2 = similarity.feature_vector_sampling(m1, events, max_count) +f3 = similarity.feature_vector_sampling(m2, events, max_count) +f4 = similarity.feature_vector_sampling(m3, events, max_count) + +import numpy as np + +R = np.array([f1, f2, f3, f4]) + +print(R) + +############################################################################## +# There is freedom in the choice of ``events`` composing the feature vectors and we encourage the +# reader to explore different combinations. Note, however, that odd photon-numbered events have +# zero probability because ideal GBS only generates and outputs pairs of photons. +# +# Given our points in the feature space and their target labels, we can use +# scikit-learn's Support Vector Machine `LinearSVC `__ as our model to train: + +from sklearn.svm import LinearSVC +from sklearn.preprocessing import StandardScaler + +R_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance + +classifier = LinearSVC() +classifier.fit(R_scaled, classes) + +############################################################################## +# Here, the term "linear" refers to the *kernel* function used to calculate inner products +# between vectors in the space. We can use a linear SVM because we have already embedded the +# graphs in a feature space based on GBS. We have also rescaled the feature vectors so that they +# zero mean and unit variance using scikit-learn's ``StandardScaler``, a technique +# `often used `__ in machine learning. +# +# We can then visualize the trained SVM by plotting the decision boundary with respect to the +# points: + +w = classifier.coef_[0] +i = classifier.intercept_[0] + +m = -w[0] / w[1] # finding the values for y = mx + b +b = -i / w[1] + +xx = [-1, 1] +yy = [m * x + b for x in xx] + +fig = plot.points(R_scaled, classes) +fig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode="lines")) + +plotly.offline.plot(fig, filename="SVM.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/SVM.html +# +# This plot shows the two classes (grey points for class 0 and red points for class 1) +# successfully separated by the linear hyperplane using the GBS feature space. Moreover, +# recall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly, +# their corresponding feature vectors are very similar. In fact, the feature vectors of +# isomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy +# in this plot is due to the statistical approximation from sampling. diff --git a/doc/tutorials_apps/run_tutorial_similarity.py.md5 b/doc/tutorials_apps/run_tutorial_similarity.py.md5 new file mode 100644 index 000000000..a62980ce8 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_similarity.py.md5 @@ -0,0 +1 @@ +d84b65b306f8ed47beabf3b7ac1d4425 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_similarity.rst b/doc/tutorials_apps/run_tutorial_similarity.rst new file mode 100644 index 000000000..e0d3341bc --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_similarity.rst @@ -0,0 +1,596 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_similarity.py: + + +.. _apps-sim-tutorial: + +Graph similarity +================ + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity` + +This page looks at how to use GBS to construct a similarity measure between graphs, +known as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based +data for machine learning tasks such as classification using a support vector machine. + +Graph data +---------- + +We begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs, +which will be needed in the following. + +Let's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a +dataset of 188 different graphs that each correspond to the structure of a chemical compound. Our +goal is to use GBS samples from these graphs to measure their similarity. + +The :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG +dataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting +photon click events. We'll start by loading four sets of samples and visualizing the +corresponding graphs. + + +.. code-block:: default + + + from strawberryfields.apps import data, plot, similarity + + m0 = data.Mutag0() + m1 = data.Mutag1() + m2 = data.Mutag2() + m3 = data.Mutag3() + + + + + + + +These datasets contain both the adjacency matrix of the graph and the samples generated through +GBS. We can access the adjacency matrix through: + + +.. code-block:: default + + + m0_a = m0.adj + m1_a = m1.adj + m2_a = m2.adj + m3_a = m3.adj + + + + + + + +Samples from these graphs can be accessed by indexing: + + +.. code-block:: default + + + print(m0[0]) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0] + + +We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module, +we need to convert the adjacency matrices into NetworkX Graphs: + + +.. code-block:: default + + + import networkx as nx + import plotly + + plot_mutag_0 = plot.graph(nx.Graph(m0_a)) + plot_mutag_1 = plot.graph(nx.Graph(m1_a)) + plot_mutag_2 = plot.graph(nx.Graph(m2_a)) + plot_mutag_3 = plot.graph(nx.Graph(m3_a)) + + plotly.offline.plot(plot_mutag_0, filename="MUTAG_0.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/MUTAG_0.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``plot_mutag_0.show()`` to view your graph. + + +.. code-block:: default + + + plotly.offline.plot(plot_mutag_1, filename="MUTAG_1.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/MUTAG_1.html + + +.. code-block:: default + + + plotly.offline.plot(plot_mutag_2, filename="MUTAG_2.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/MUTAG_2.html + + +.. code-block:: default + + + plotly.offline.plot(plot_mutag_3, filename="MUTAG_3.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/MUTAG_3.html + +The graphs of ``m1_a`` and ``m2_a`` look very similar. In fact, +it turns out that they are *isomorphic* to each other, which means that the graphs can be made +identical by permuting their node labels. + +Creating a feature vector +------------------------- + +Following :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph. +These feature vectors contain information about the graphs and can be viewed as a mapping to a +high-dimensional feature space, a technique often used in machine learning that allows us to +employ properties of the feature space to separate and classify the vectors. + +The feature vector of a graph can be composed in a variety of ways. One approach is to +associate features with the relative frequencies of certain types of measurements being +recorded from a GBS device configured to sample from the graph, as we now discuss. + +We begin by defining the concept of an *orbit*, which is the set of all GBS samples that are +equivalent under permutation of the modes. A sample can be converted to its corresponding orbit +using the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0, +0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit: + + +.. code-block:: default + + + print(similarity.sample_to_orbit(m0[0])) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [1, 1] + + +Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples +can be randomly generated from the ``[1, 1]`` orbit using: + + +.. code-block:: default + + + print(similarity.orbit_to_sample([1, 1], modes=m0.modes)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0] + + +Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are +statistically more likely to be observed. However, we are interested in coarse-graining further +into *events*, which correspond to a combination of orbits with the same photon number such +that the number of photons counted in each mode does not exceed a fixed value +``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon +number of 5: + + +.. code-block:: default + + + print(list(similarity.orbits(5))) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [[1, 1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1], [2, 2, 1], [4, 1], [3, 2], [5]] + + +All 5-photon samples belong to one of the orbits above. A 5-photon event with +``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1], +[3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example, +the sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event: + + +.. code-block:: default + + + print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 5 + + +Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event: + + +.. code-block:: default + + + print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + None + + +Now that we have mastered orbits and events, how can we make a feature vector? It was shown in +:cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the +frequencies of events. Specifically, for a :math:`k` photon event :math:`E_{k, n_{\max}}` +with maximum count per mode :math:`n_{\max}` and corresponding probability :math:`p_{k, +n_{\max}}:=p_{E_{k, n_{\max}}}(G)` with respect to a graph :math:`G`, a feature vector can be +written as + +.. math:: + f_{\mathbf{k}, n_{\max}} = (p_{k_{1}, n_{\max}}, p_{k_{2}, n_{\max}}, \ldots , p_{k_{K}, + n_{\max}}), + +where :math:`\mathbf{k} := (k_{1}, k_{2}, \ldots , k_{K})` is a list of different total photon +numbers. + +For example, if :math:`\mathbf{k} := (2, 4, 6)` and :math:`n_{\max} = 2`, we have + +.. math:: + f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}). + +In this case, we are interested in the probabilities of events :math:`E_{2, 2}`, :math:`E_{4, +2}`, and :math:`E_{6, 2}`. Suppose we are sampling from a four-mode device and have the samples +``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and +``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the :math:`E_{4, 2}` event while +``[2, 1, 1]`` is. + +Calculating a feature vector +---------------------------- + +We provide two methods for calculating a feature vector of GBS event probabilities in +Strawberry Fields: + +1. Through sampling. +2. Using a Monte Carlo estimate of the probability. + +In the first method, all one needs to do is generate some GBS samples from the graph of +interest and fix the composition of the feature vector. For example, for a feature vector +:math:`f_{\mathbf{k} = (2, 4, 6), n_{\max}=2}` we use: + + +.. code-block:: default + + + print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [0.19035, 0.2047, 0.1539] + + +For the second method, suppose we want to calculate the event probabilities exactly rather than +through sampling. To do this, we consider the event probability :math:`p_{k, n_{\max}}` as the +sum over all sample probabilities in the event. In GBS, each sample probability is determined by +the hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes +calculating :math:`p_{k, n_{\max}}` really challenging is the number of samples the corresponding +event contains! For example, the 6-photon event over 17 modes :math:`E_{k=6, n_{\max}=2}` +contains the following number of samples : + + +.. code-block:: default + + + print(similarity.event_cardinality(6, 2, 17)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 58276 + + +To avoid calculating a large number of sample probabilities, an alternative is to perform a +Monte Carlo approximation. Here, samples within an event are selected uniformly at random and +their resultant probabilities are calculated. If :math:`N` samples :math:`\{S_{1}, S_{2}, +\ldots , S_{N}\}` are generated, then the event probability can be approximated as + +.. math:: + p(E_{k, n_{\max}}) \approx \frac{1}{N}\sum_{i=1}^N p(S_i) |E_{k, n_{\max}}|, + +with :math:`|E_{k, n_{\max}}|` denoting the cardinality of the event. + +This method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is +approximated as: + + +.. code-block:: default + + + print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6)) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + 0.20519880474018276 + + +The feature vector can then be calculated through Monte Carlo sampling using +:func:`~.feature_vector_mc`. + +.. note:: + The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and + may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy + but slow down calculation. + +The second method of Monte Carlo approximation is intended for use in scenarios where it is +computationally intensive to pre-calculate a statistically significant dataset of samples from +GBS. + +Machine learning with GBS graph kernels +--------------------------------------- + +The power of feature vectors that embed graphs in a vector space of real numbers is that we can +now measure similarities between graphs. This is very useful in machine learning, where similar +labels are assigned to graphs that are close to each other. GBS feature vectors therefore give +rise to a similarity measure between graphs! + +Let's build this up a bit more. The MUTAG dataset we are considering contains not only graphs +corresponding to the structure of chemical compounds, but also a *label* of each +compound based upon its mutagenic effect. The four graphs we consider here have labels: + +- MUTAG0: Class 1 +- MUTAG1: Class 0 +- MUTAG2: Class 0 +- MUTAG3: Class 1 + + +.. code-block:: default + + + classes = [1, 0, 0, 1] + + + + + + + +We can use GBS feature vectors in a `support vector machine +`__ (SVM) that finds a separating +hyperplane between classes in the feature space. We start by defining two-dimensional feature +vectors: + + +.. code-block:: default + + + events = [8, 10] + max_count = 2 + + f1 = similarity.feature_vector_sampling(m0, events, max_count) + f2 = similarity.feature_vector_sampling(m1, events, max_count) + f3 = similarity.feature_vector_sampling(m2, events, max_count) + f4 = similarity.feature_vector_sampling(m3, events, max_count) + + import numpy as np + + R = np.array([f1, f2, f3, f4]) + + print(R) + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [[0.0884 0.042 ] + [0.0704 0.02855] + [0.06995 0.02935] + [0.0962 0.04585]] + + +There is freedom in the choice of ``events`` composing the feature vectors and we encourage the +reader to explore different combinations. Note, however, that odd photon-numbered events have +zero probability because ideal GBS only generates and outputs pairs of photons. + +Given our points in the feature space and their target labels, we can use +scikit-learn's Support Vector Machine `LinearSVC `__ as our model to train: + + +.. code-block:: default + + + from sklearn.svm import LinearSVC + from sklearn.preprocessing import StandardScaler + + R_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance + + classifier = LinearSVC() + classifier.fit(R_scaled, classes) + + + + + + + +Here, the term "linear" refers to the *kernel* function used to calculate inner products +between vectors in the space. We can use a linear SVM because we have already embedded the +graphs in a feature space based on GBS. We have also rescaled the feature vectors so that they +zero mean and unit variance using scikit-learn's ``StandardScaler``, a technique +`often used `__ in machine learning. + +We can then visualize the trained SVM by plotting the decision boundary with respect to the +points: + + +.. code-block:: default + + + w = classifier.coef_[0] + i = classifier.intercept_[0] + + m = -w[0] / w[1] # finding the values for y = mx + b + b = -i / w[1] + + xx = [-1, 1] + yy = [m * x + b for x in xx] + + fig = plot.points(R_scaled, classes) + fig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode="lines")) + + plotly.offline.plot(fig, filename="SVM.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/SVM.html + +This plot shows the two classes (grey points for class 0 and red points for class 1) +successfully separated by the linear hyperplane using the GBS feature space. Moreover, +recall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly, +their corresponding feature vectors are very similar. In fact, the feature vectors of +isomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy +in this plot is due to the statistical approximation from sampling. + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 27.124 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_similarity.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_similarity.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_similarity.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_vibronic.ipynb b/doc/tutorials_apps/run_tutorial_vibronic.ipynb new file mode 100644 index 000000000..e187dae29 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_vibronic.ipynb @@ -0,0 +1,161 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\nVibronic spectra\n================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic`\n\nHere we study how GBS can be used to compute vibronic spectra. So let's start from\nthe beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on\nthe allowed transitions between different electronic states. These electronic transitions\ncan be accompanied by changes in the vibrational energy of the molecules. In this case, the\nabsorption lines that represent the frequencies at which light is more strongly absorbed are\nreferred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational\nand electronic transitions of a molecule upon absorption of light.\n\nIt is possible to determine vibronic spectra by running clever and careful spectroscopy experiments.\nHowever, this can be slow and expensive, in which case it is valuable to predict vibronic spectra\nusing theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few\nrelevant molecular parameters:\n\n#. $\\Omega$: diagonal matrix whose entries are the square-roots of the frequencies of the\n normal modes of the electronic *initial* state.\n#. $\\Omega'$: diagonal matrix whose entries are the square-roots of the frequencies of the\n normal modes of the electronic *final* state.\n#. $U_\\text{D}$: Duschinsky matrix.\n#. $\\delta$: displacement vector.\n#. $T$: temperature.\n\nThe Duschinsky matrix and displacement vector encode information regarding how\nvibrational modes are transformed when the molecule changes from the initial to final electronic\nstate. At zero temperature, all initial modes are in the vibrational ground state. At finite\ntemperature, other vibrational states are also populated.\n\nIn the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters\nare sufficient to determine the configuration of a GBS device. As opposed to other applications\nthat involve only single-mode squeezing and linear interferometry, in vibronic spectra we\nprepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing,\nand displacements.\n\nThe function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be\nused to obtain the squeezing, interferometer, and displacement parameters from the input\nchemical parameters listed above. In this page, we study the vibronic spectrum of\n`formic acid `_ \ud83d\udc1c. Its chemical parameters, obtained\nfrom :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import vibronic, data\nimport numpy as np\nformic = data.Formic()\nw = formic.w # ground state frequencies\nwp = formic.wp # excited state frequencies\nUd = formic.Ud # Duschinsky matrix\ndelta = formic.delta # displacement vector\nT = 0 # temperature" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now map this chemical information to GBS parameters using the function\n:func:`~.gbs_params`:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that since two-mode squeezing operators are involved, if we have $N$ vibrational\nmodes, the Gaussian state prepared is a $2N$-mode Gaussian state and the samples\nare vectors of length $2N$. The first $N$ modes are those of the final electronic\nstate; the remaining $N$ modes are those of the ground state. From above, $t$ is a\nvector of two-mode squeezing parameters, $U_1$ and $U_2$ are the interferometer\nunitaries (we need two interferometers), $r$ is a vector of single-mode squeezing\nparameters, and `alpha` is a vector of displacements.\n\nPhotons detected at the output of the GBS device correspond to a specific transition energy.\nThe GBS algorithm for vibronic spectra works because the programmed device provides samples\nin such a way that the energies that are sampled with high probability are the peaks of the\nvibronic spectrum. The function :func:`~.energies` can be used to compute the energies for\na set of samples. In this case we show the energy of the first five samples:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "e = vibronic.energies(formic, w, wp)\nprint(np.around(e[:5], 4)) # 4 decimal precision" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we\ngenerate many samples, compute their energies, and make a histogram of the observed energies.\nThe :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for\nuse in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a\n:func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see\nhow this is done for just a few samples:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from strawberryfields.apps import sample, plot\nimport plotly\nnr_samples = 10\ns = sample.vibronic(t, U1, r, U2, alpha, nr_samples)\ne = vibronic.energies(s, w, wp)\nspectrum = plot.spectrum(e, xmin=-1000, xmax=8000)\nplotly.offline.plot(spectrum, filename=\"spectrum.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/spectrum.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``spectrum.show()`` to generate the figure.

\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian\nbroadening of the spectrum, which better represents the observations from an actual experiment.\nOf course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's\ninstead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "e = vibronic.energies(formic, w, wp)\nfull_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000)\nplotly.offline.plot(full_spectrum, filename=\"full_spectrum.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. raw:: html\n :file: ../../examples_apps/full_spectrum.html\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in\nRef. :cite:`huh2015boson`, shown below:\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](../_static/formic_spec.png)\n\n :width: 740px\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic\nspectrum can be computed using classical computers. However, for larger molecules, this task\nquickly becomes intractable, for much the same reason that simulating GBS cannot be done\nefficiently with classical devices. Photonic quantum computing therefore holds the potential to\nenable new computational capabilities in this area of quantum chemistry \u269b\ufe0f.\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_vibronic.py b/doc/tutorials_apps/run_tutorial_vibronic.py new file mode 100644 index 000000000..bee8bd0bf --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_vibronic.py @@ -0,0 +1,134 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports +r""" +.. _apps-vibronic-tutorial: + +Vibronic spectra +================ + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic` + +Here we study how GBS can be used to compute vibronic spectra. So let's start from +the beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on +the allowed transitions between different electronic states. These electronic transitions +can be accompanied by changes in the vibrational energy of the molecules. In this case, the +absorption lines that represent the frequencies at which light is more strongly absorbed are +referred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational +and electronic transitions of a molecule upon absorption of light. + +It is possible to determine vibronic spectra by running clever and careful spectroscopy experiments. +However, this can be slow and expensive, in which case it is valuable to predict vibronic spectra +using theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few +relevant molecular parameters: + +#. :math:`\Omega`: diagonal matrix whose entries are the square-roots of the frequencies of the + normal modes of the electronic *initial* state. +#. :math:`\Omega'`: diagonal matrix whose entries are the square-roots of the frequencies of the + normal modes of the electronic *final* state. +#. :math:`U_\text{D}`: Duschinsky matrix. +#. :math:`\delta`: displacement vector. +#. :math:`T`: temperature. + +The Duschinsky matrix and displacement vector encode information regarding how +vibrational modes are transformed when the molecule changes from the initial to final electronic +state. At zero temperature, all initial modes are in the vibrational ground state. At finite +temperature, other vibrational states are also populated. + +In the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters +are sufficient to determine the configuration of a GBS device. As opposed to other applications +that involve only single-mode squeezing and linear interferometry, in vibronic spectra we +prepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing, +and displacements. + +The function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be +used to obtain the squeezing, interferometer, and displacement parameters from the input +chemical parameters listed above. In this page, we study the vibronic spectrum of +`formic acid `_ 🐜. Its chemical parameters, obtained +from :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module: +""" +from strawberryfields.apps import vibronic, data +import numpy as np +formic = data.Formic() +w = formic.w # ground state frequencies +wp = formic.wp # excited state frequencies +Ud = formic.Ud # Duschinsky matrix +delta = formic.delta # displacement vector +T = 0 # temperature + +############################################################################## +# We can now map this chemical information to GBS parameters using the function +# :func:`~.gbs_params`: + +t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T) + +############################################################################## +# Note that since two-mode squeezing operators are involved, if we have :math:`N` vibrational +# modes, the Gaussian state prepared is a :math:`2N`-mode Gaussian state and the samples +# are vectors of length :math:`2N`. The first :math:`N` modes are those of the final electronic +# state; the remaining :math:`N` modes are those of the ground state. From above, :math:`t` is a +# vector of two-mode squeezing parameters, :math:`U_1` and :math:`U_2` are the interferometer +# unitaries (we need two interferometers), :math:`r` is a vector of single-mode squeezing +# parameters, and `alpha` is a vector of displacements. +# +# Photons detected at the output of the GBS device correspond to a specific transition energy. +# The GBS algorithm for vibronic spectra works because the programmed device provides samples +# in such a way that the energies that are sampled with high probability are the peaks of the +# vibronic spectrum. The function :func:`~.energies` can be used to compute the energies for +# a set of samples. In this case we show the energy of the first five samples: + +e = vibronic.energies(formic, w, wp) +print(np.around(e[:5], 4)) # 4 decimal precision + +############################################################################## +# Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we +# generate many samples, compute their energies, and make a histogram of the observed energies. +# The :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for +# use in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a +# :func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see +# how this is done for just a few samples: + +from strawberryfields.apps import sample, plot +import plotly +nr_samples = 10 +s = sample.vibronic(t, U1, r, U2, alpha, nr_samples) +e = vibronic.energies(s, w, wp) +spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) +plotly.offline.plot(spectrum, filename="spectrum.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/spectrum.html +# +# .. note:: +# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In +# practice, you can simply use ``spectrum.show()`` to generate the figure. + +############################################################################## +# The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian +# broadening of the spectrum, which better represents the observations from an actual experiment. +# Of course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's +# instead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module. + +e = vibronic.energies(formic, w, wp) +full_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) +plotly.offline.plot(full_spectrum, filename="full_spectrum.html") + +############################################################################## +# .. raw:: html +# :file: ../../examples_apps/full_spectrum.html + +############################################################################## +# +# We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in +# Ref. :cite:`huh2015boson`, shown below: + +############################################################################## +# .. image:: ../_static/formic_spec.png +# :width: 740px + +############################################################################## +# +# The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic +# spectrum can be computed using classical computers. However, for larger molecules, this task +# quickly becomes intractable, for much the same reason that simulating GBS cannot be done +# efficiently with classical devices. Photonic quantum computing therefore holds the potential to +# enable new computational capabilities in this area of quantum chemistry ⚛️. diff --git a/doc/tutorials_apps/run_tutorial_vibronic.py.md5 b/doc/tutorials_apps/run_tutorial_vibronic.py.md5 new file mode 100644 index 000000000..d92eaf695 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_vibronic.py.md5 @@ -0,0 +1 @@ +5512f615815ad19b173f76f0a0f19618 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_vibronic.rst b/doc/tutorials_apps/run_tutorial_vibronic.rst new file mode 100644 index 000000000..90590ac54 --- /dev/null +++ b/doc/tutorials_apps/run_tutorial_vibronic.rst @@ -0,0 +1,219 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_apps_run_tutorial_vibronic.py: + + +.. _apps-vibronic-tutorial: + +Vibronic spectra +================ + +*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic` + +Here we study how GBS can be used to compute vibronic spectra. So let's start from +the beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on +the allowed transitions between different electronic states. These electronic transitions +can be accompanied by changes in the vibrational energy of the molecules. In this case, the +absorption lines that represent the frequencies at which light is more strongly absorbed are +referred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational +and electronic transitions of a molecule upon absorption of light. + +It is possible to determine vibronic spectra by running clever and careful spectroscopy experiments. +However, this can be slow and expensive, in which case it is valuable to predict vibronic spectra +using theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few +relevant molecular parameters: + +#. :math:`\Omega`: diagonal matrix whose entries are the square-roots of the frequencies of the + normal modes of the electronic *initial* state. +#. :math:`\Omega'`: diagonal matrix whose entries are the square-roots of the frequencies of the + normal modes of the electronic *final* state. +#. :math:`U_\text{D}`: Duschinsky matrix. +#. :math:`\delta`: displacement vector. +#. :math:`T`: temperature. + +The Duschinsky matrix and displacement vector encode information regarding how +vibrational modes are transformed when the molecule changes from the initial to final electronic +state. At zero temperature, all initial modes are in the vibrational ground state. At finite +temperature, other vibrational states are also populated. + +In the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters +are sufficient to determine the configuration of a GBS device. As opposed to other applications +that involve only single-mode squeezing and linear interferometry, in vibronic spectra we +prepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing, +and displacements. + +The function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be +used to obtain the squeezing, interferometer, and displacement parameters from the input +chemical parameters listed above. In this page, we study the vibronic spectrum of +`formic acid `_ 🐜. Its chemical parameters, obtained +from :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module: + + +.. code-block:: default + + from strawberryfields.apps import vibronic, data + import numpy as np + formic = data.Formic() + w = formic.w # ground state frequencies + wp = formic.wp # excited state frequencies + Ud = formic.Ud # Duschinsky matrix + delta = formic.delta # displacement vector + T = 0 # temperature + + + + + + + +We can now map this chemical information to GBS parameters using the function +:func:`~.gbs_params`: + + +.. code-block:: default + + + t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T) + + + + + + + +Note that since two-mode squeezing operators are involved, if we have :math:`N` vibrational +modes, the Gaussian state prepared is a :math:`2N`-mode Gaussian state and the samples +are vectors of length :math:`2N`. The first :math:`N` modes are those of the final electronic +state; the remaining :math:`N` modes are those of the ground state. From above, :math:`t` is a +vector of two-mode squeezing parameters, :math:`U_1` and :math:`U_2` are the interferometer +unitaries (we need two interferometers), :math:`r` is a vector of single-mode squeezing +parameters, and `alpha` is a vector of displacements. + +Photons detected at the output of the GBS device correspond to a specific transition energy. +The GBS algorithm for vibronic spectra works because the programmed device provides samples +in such a way that the energies that are sampled with high probability are the peaks of the +vibronic spectrum. The function :func:`~.energies` can be used to compute the energies for +a set of samples. In this case we show the energy of the first five samples: + + +.. code-block:: default + + + e = vibronic.energies(formic, w, wp) + print(np.around(e[:5], 4)) # 4 decimal precision + + + + + +.. rst-class:: sphx-glr-script-out + + Out: + + .. code-block:: none + + [1566.4602 4699.3806 1566.4602 4699.3806 4699.3806] + + +Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we +generate many samples, compute their energies, and make a histogram of the observed energies. +The :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for +use in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a +:func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see +how this is done for just a few samples: + + +.. code-block:: default + + + from strawberryfields.apps import sample, plot + import plotly + nr_samples = 10 + s = sample.vibronic(t, U1, r, U2, alpha, nr_samples) + e = vibronic.energies(s, w, wp) + spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) + plotly.offline.plot(spectrum, filename="spectrum.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/spectrum.html + +.. note:: + The command ``plotly.offline.plot()`` is used to display plots in the documentation. In + practice, you can simply use ``spectrum.show()`` to generate the figure. + +The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian +broadening of the spectrum, which better represents the observations from an actual experiment. +Of course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's +instead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module. + + +.. code-block:: default + + + e = vibronic.energies(formic, w, wp) + full_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) + plotly.offline.plot(full_spectrum, filename="full_spectrum.html") + + + + + + + +.. raw:: html + :file: ../../examples_apps/full_spectrum.html + +We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in +Ref. :cite:`huh2015boson`, shown below: + +.. image:: ../_static/formic_spec.png + :width: 740px + +The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic +spectrum can be computed using classical computers. However, for larger molecules, this task +quickly becomes intractable, for much the same reason that simulating GBS cannot be done +efficiently with classical devices. Photonic quantum computing therefore holds the potential to +enable new computational capabilities in this area of quantum chemistry ⚛️. + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 15.221 seconds) + + +.. _sphx_glr_download_tutorials_apps_run_tutorial_vibronic.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: run_tutorial_vibronic.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: run_tutorial_vibronic.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/sg_execution_times.rst b/doc/tutorials_apps/sg_execution_times.rst new file mode 100644 index 000000000..c61b1b9a3 --- /dev/null +++ b/doc/tutorials_apps/sg_execution_times.rst @@ -0,0 +1,15 @@ + +:orphan: + +.. _sphx_glr_tutorials_apps_sg_execution_times: + +Computation times +================= +**00:08.272** total execution time for **tutorials_apps** files: + +- **00:08.272**: :ref:`sphx_glr_tutorials_apps_run_tutorial_sample.py` (``run_tutorial_sample.py``) +- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_dense.py` (``run_tutorial_dense.py``) +- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_max_clique.py` (``run_tutorial_max_clique.py``) +- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_points.py` (``run_tutorial_points.py``) +- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_similarity.py` (``run_tutorial_similarity.py``) +- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_vibronic.py` (``run_tutorial_vibronic.py``) diff --git a/doc/tutorials_apps/tutorials_apps_jupyter.zip b/doc/tutorials_apps/tutorials_apps_jupyter.zip new file mode 100644 index 0000000000000000000000000000000000000000..b28507ea598c8f25e909f1a757358d369c98c121 GIT binary patch literal 68075 zcmeI5%X3^;e&6XhNx8i6$_u;V*jF5+rpW+J03pcAUm;e2*U(=tp-}&i--^%X()!!%AuIcY@@Y!@e>dxk~@ib2d z-F`MIvUWZ>A00gV)g-yz%Laqu`cCqgUy?*0>5qNfot;lIzPjEU_p|Hk@vH1C>&<8R zc+~BU=c5@9jOK$u`SmcHrTui4@_8Ih4p&zAPBWbZgH5-mL8-?ZP)`pLn0QcRBX(b>jPIv8Zr^Umm7$w#xKmyVKy zEa}aQ*?5?L$&<5ja(bN3vZvWJIUmoHL4J~TM*n4)&L)HLY>*!$`Dl=jvg=`mPwd~Y z&bXr{e1_@tq(43#RWPapq1xiA-E>Bs(fc}+M~ufd zrbRos&#=Scc$7@0V>UR{MEPizO|xRAgJ(Sq-cRDX`Gj-rj93it8L`YkIz7t3&LMjZ z%N%7r0i^&bW~W(ZFw3XOh>bDj-f_l3R09sva~3gW#>tqOW@C*$9L`7iEYAvod}+V- z_yGLsCuvczUp0tyB3V1vX%+ zKsPvFXTyR+TATP8PZy(QQlVa!At!&gj%}?@4*3Z-SczSe8 zzuhua>_!N@v)}C+RKU4Wk^5n%$=b=ho{x@fRNEA%Z*-y)O_QViDW_OK+UrdS zgl9KcY_-mSO5h+X-v9LB#`n`{KlwQ@;jv>L8)Zdd0MSWwM)!-PI2Ob?u$^8$%XaqD z=`{UvJV>`Ux9-gg7QMgD3i}yosvR6=>1+z%5|PmPC6( z1Ynukqy6k@-eZILEGg3AWFTM~&xi+AK$5fL5QO7=zGd(7qe^<2tFT9#?x5>_9s&iE}w!3luL}f4Hh`PN6S3Di zqsFZ9cu?1z>Qy0)P!mmM+orwdF&RzjHUq^fRKGG20A-peU^Zq#`cYD)$v!9u< zg_MP-NX~w;RsRxvLybZh!j(lxIfwc}PUd332L+ufs)1d6F?I%r%ul{I?RVyzTX(mL z&Ky2?`!jyq%-$NE=gAbr0`d`I_m0uKoWcWs9VXkGE{cV*y(#>XW!V-a5$c>xOmwq- z*H=|!>;h``O?E~rR{4@a?tLc=6Mgkk&unpIglwsiIzPIBb=8I95o}5< zYO6(#=LiGsAnq&Tu}dEIVEl*ONwGpoyAah+zSZOOsq^qt)F*zPrkp`PJDO(VJSf&0 zybcwZjE`1>kT}KOB1a_+r5P zB^_{(_oSDK)q*wbiqmBrG4%+bV&_Wpw|L&WiQwFR=w#8lQHK-=x*I^%uHe@SudXLs zZ?3~h^va;?D7On&YtP2r$mjgy_(nVJf5Foc_~qEVP(rm;O_cS;OnnGVGDX>G4YE-y z3~=Md1u@W1$Ft-h?Z1KYkQlxX$8c1`?(NOZ%@Ug=@&{N0hE<(;|K4rbTtC~)oCQRX z$GwWg(q7Q>v$Qxj+W|gn>g?}IkuxgA@IoDc*A4gwaUHN7&1a0=yU|C%>4^t^oDMOn zH$F~J(;V&R2wQ%349<$&nJVaWIxY4ScoCfHQEBKUADE?gwGkWWg0+LZn6==dM_Fro z)3DUgbUZ5&RZwUf6hKe^)TDTLZgAcnA0EoaZ|g~(iSNo>1q;(*w%fTL&a{0z8xA_x zFA5>sZ6w6$&Q4;}j^y#!(V}*ecKa6p&(7Qc=*nil)mS5(_HAq|kO|9^4W#%%q$~5^ z4Q$PE=lVNuAK!Tgl+WJ2#Sd>!-VtY(MJsN&zb`;60_Vp5ehtI0;R>SzJ!+;cytC#1wB~jxLE@tt7Y-XJCRs1Xjn(&=eZr?HjuoM& zgQGTsd}iABY&mdtW;}`Mi^e9Y7NnJl}-JMczC&NA3rq2bFgUwVnn%HN#*tA zo@)WPY24Afd;3O`d`q4RS@j9v5)f}Y9)O~8NPJjA1$X0PylConDJiaILdc5@ny(hl0_2$c8+1jq& zadmQ0A&4VE)HP+P{Pa?*OAD(+JiG!gxm=8Vm97@IC*y6!*=)UeB@QPp^VLr0>!^Dl zDaMo&rNXl=HUcLVH?0^m;k@H#I_wvva8UB+A+c2*jYdBM~f; zC(dZjRXO3MUqoKmi(Hcx4X`ognA55je9Qh=RMlYp^Fcp3&IS{CgA{1tHNsTHmMRWv zN={(TNHyceEL@l|w24x@0YEeLWaET^WRYe*NJ&XfR4{S8-lkD@SwOV)}Ms z-&WtnxBTc(jxlGV=y6EG_gYCg7heIw0_B0{ zZ->Bj6?Yad=w@f=3gMCK?a6uh^7Ua3Ktia0#$&h6HWtP&CtmveAo$Q2UXht>x(X7L z^CN|$i|&Bj0ed2Bpa#im{nu}|-)!Ft;9@VfkL#lYluAOMjDJVz0p$nXVLlpb(9K_c z{;0kcd>BhBd`um?3^B_#EHA?#?8RAkULwpic!uRZO7IPs)*8lpc z5P`R9vRZ@%6d%YffU`pE-kDxzJTCe@jiC+3%F+mCLdl*?dRV~KUDAQgE6!VnMBKN1 zlwnNGQmCnKu~QNu3>M~8B+Uhbm+?ihVu+EuyHanVQG5X&QHMB}5+17P22S?^viw~$ zk_jD2pJk_n z0Dg#iCu<+BN&k1ZrXGwsd<21sLI>EpN{A?@!4>k)3+Uq-bP*^atSJxjBhnSf9SRtD zKzM10u#PSuJtqq{_*w1DmATRx8Eh-p%;_;M4MHS}NCzp`M2AviXrP20`W`W*3J-vZ z1^H6M-FXQ7Zr|}h-`@TAzPEAzkuS$x!Jt$KJk3C8n37+WOi}{&Qde;6fDOuM*$DHR zpAr4)44H0l4cQlmk7xXy$@?nh3<`!O31NwVBP3f7DKDc4Z8w#nqTW zk%^v(xrTHl^bt?x6n(mTG6fPd)J(qGT7T=_ovTf*f}#Tv)v8P%sy=P)-M5hBJ%}&@ zs(kDvSw@kTMZ3SSEF-x=F*N#fQl_hvB1wn^t0zR(%-XhOZW-j3$o&BZCxP>IVt85?vHHfHNZ^W6-41=s>g*E8&(Ckh%Qix6H_US|OE)y69JX%$am z|8V?y{TB~Y8{??-#gcJ*uxBN z7-nN*&vx2(4xe|fw{`64ti>MnL@O@QMsRxRT>m`mc)IT!(F za7o%r!L=6IaP8f9lkcPFqzA+eZP@7Fl&hf3UZ_96B9P_fkufgRtVO(B7~~u2D$-a8 zk1WNvjx&Y;CCYTJe_$0qf@lo}dbM*sI+T30$2(h~O~kd%_4}Ah%1^Jo=6vqZ zO7S1SF{vF1-e~308CF)hwWk_E1i|I(nJ%WSM$XNTz6uvvDqNr9HB+PnJAAob^Q46|BRis472Vjg z04L;$!Sg@G^-c1mhEoY?D26l8TdB$wFQD?!mg(uGWU!>Tx@5q}-dsiLbpl$+5^SWb zzswXUvx#`({2;F5@y__c5Fq(P>i{cLd9s69kKmZI|+3(wK`49=wxUYYCfYhBU{1iAcbDy z=P5DCWeV>1#k9A8wkv%+^I=eNF29dwVW}7ALx8BQ*zIR66~>Ma(W#;(7S7bOG(Z z|IsJ+-+L4)gYorCKXw91yGShkc^&DZ{&`z_msjOB(h4&solV5!0JMs55BRYVb&nau=hWc0WfuU*_Ed5htkqxt=so8X3Z4;f$F;jZB#TKSdKF3HLF@vQu_lp z2n`y%@bi38dKdXdn&mb!j`trf`v70fO;xSY#(OVRCPq_~z;&%WQRx&fw9M)wNdaou zW?Jhbw{)Xx$nsKxd|}`qeI=Rw!?Hqv+JF*}tA)Ek!owO3Rmt3#WPH5D4yc5FFo*(C zv5vT;$7tX*^jK2DpJ>{T@NKA5#A3ck5MI%h@dI@PDN{RKw28I2KGB@|qWl+bk;+pI z-7E4d4|kBuy#WO=D6z5N0&W%-84ZosLO@5rFG=V+#>IymEoJ(x5YzFYML8Wm4>*09 zYx-6#_0{C`kUO#sLZy6U9s!#2Wh^)D{9LbklnS3Bmf z;~0=SwxABV9;9mgLJ8SKmZFgnq=PJAvp3AxpzvNkEBKGsO_Q2-gh>eRvgjN}%8;sR zB!)M1a!mtEKO}U=Og0a*<9Eh&wiIwA&-a^*ZAd^+fly8dlRSm`z1dJ|9Gf5_;!xO&xL9T2ZpGU?MZN0E9;FOqh3ox)mYSnJg zqE8BVm{(rHU~z`Y)0q-QscU3b&kiaJ#F=48xhpLSyvUT7QNOLON8>QDHAXb&D6fx) z^7;e|kQ4{2K<}x_`IN?G_%zjLh8lX+h!6x0xxpuDoAsr-m2JnYS-J$JgMC{OE?YVa zVvbtSSi5Ztaw;WMGcxDov{no@>d<+=8ja!DRhnl#QQZRC4VW{4rAU>hLah^xzUV_l ztj)GW61w451XEb`7b$hGM;PTwYPLuA+rlzQG}dxZt3IkhzLtwY-m>5>*xBo5ASp(= zxIWL{W$BC_(n4eDy6q>WQe9<39F^sau6?}bd!mDU?TI6WNY zDR^17#pAJ!Q=$a(qOC{)zmk^?Cmz#8@D?5%EUH`KEmOF>ZD7c75kYwwez{0giw3O8 zPNM^!bt+@*#Zw#~U;R5Ryax_2j{N5rsl!vMhORU41- zM(?E98I>p=i>0F>2|1`02TcdPorstn$O&DLxj;8C%1>ZQ4OlK78koDJue~fV`E|5a zLO$-54yoi?m}lI>^t`~Lohazy(9ze83^J$lGJ{My?hua+=;4@277jCn))>Itx_f%L zkJxPkS?Rs;iaWnt=GaTJ@!iZKFUA_=omhK=cJNrjCB5yP92MPz0aO_uESn6~y2-|; z9rDO7tiFp1t*DF%8^qkG6Ebu{Wt6aDiI+x>;@t(JFzUw=8OmLF9|w`iR5i(iP{1CI z1&x4k{)DK7r+i*Z?={_^k}(<(HrT)6d~otl;vI3Un*#ztRZ%8En;Df7A55^|+OT!sBcSQlFYu@R{joq8!#Qx1OIeU`6(@tlLoe*A2s)A>WbK72NP-p%>NfBJ$?f4qHb zyVEIhe7HZ}e*R)97~Vy$#oU`FB%O-Zau~;6+8MRJ;@^_Sw9w&Sukct1W0*>LzQd1C>%F3Y!R_Oq-LwKbxmRe84kU)_tbx@`mipj82W6EOtzDL8K4Ef3f~Xnnk%+}&)`!jeR( zi_?wE>Qf^#H>(0GgL(q?{ab7{mzyvEBtV_X<29_zijv1AH9Wl$aW25FM z1QxlIykLGqLg+xON(k^Pq*{Y6!B-^P%@sbxa_5@~ zfL2%b>q6^^#@Q*otbC1u1-ZIog+KyRPz~t_Pw!^5VD(#qy}6SB1xIBk;##K7I*YPA zwg6i>zXM2Ely-HH_D+t-e;(nIKs=WUy}k=9u6iVH2wgISEP~$4ivv#zv$TAs@;=34Xswnp6`gAh6>G7j8g z_wi$Zx{Xv)&Gnrp{19I67PI~?2Xh0NC2)+!IrMJ+9mi%XwQFtP<@FoM&E(DYrXC9} zAh5rOH$@(m7WEVsFM`3?AQx6Zv?q?(v{mW`ANPMiSP>Uc?P^`*+gBO`UTC3gwO!!| zsK9Xsg7y!j=WYc(7hqhVsvFAuM1*Q_BwHN5z>!2o>6W=i+AJV&sB3cI5Rh|Vz)MWA z6faTzvIB2u)kEhy!}JUF>9-rwv<6gFt}|n3@2@L45IBW514gTMjuBBin$qzxTP}4f zN`wwQdTR@g&a%Mz;~`XW5g27v=T?&}armLNkdX6*tNJRdJFezxWAD*H(T%|B2CxUe z@=-yqwMt;|JyngcDFiEaH*ef{yaB@O7`HO*d0Y5h)5E<;7j69UR+7_;N{F*+GSUIE zWYaPNYL4sLoflRHxv0r^MlaXp;j^_)uR*ITE_K>lbCGcO3$lG-+{vP17H?B16)W^a z^SXs@E%;!m{zVl;>Zs=v1R9OesNPvCenLB?{;Bx2XTzX4Se-G%uMo5gEF=4PAse`| zG+u&|%`FHrfx#u$JfxqSn*_bIHcq@z&M8-HfrJd`mnJj>1 zV0lCU*eUOuvdqG~6%2fa78IsbkgEf;2)9_UP}WWKO+#!Tt0M=@=+#BY)ayBWH7+Wj>&K_Y$@DZ-+}lQ6_;3BfqO>cYBeR4(<& z@p$I<(+VKbj)*vuuQKwGh+R`rJmbxNW{s|>5s)?s@@?hMf6>`0@ijuKr zkH_>1ubxw=$fn&0>)?^`z*4SS4?q3vM)fFiWN%@BF&#gpdh(9;uG^}RlI*IOTRR#; znJh2PKsF{}1YVdD2kXa@Q(ww>DiBHh;=o|uFm0T`8iF#k-8yZS^%B&`#k74;Fhe?| zWSd#)2(=LYj5lN0>-+n9j6y()g6zIzladuO*7AWc2XslD^;Iv=qLE@0FejuX*+?r} zO;{?XdiR6P<0lyOJ0HQZgLm@Lf6P^H+eI<9K#phg%X`VxE(y%uBBkideTz%E{+BJNA)M_ zSb0DBugU%Po=o_n>r}+2zLK{8)1pO{&sh5A)&C$6-0t3Vo-tU_os6ch_^S69(%fFj z7FWP<@dF0EDAW3*ke1Y=lvO`fheaGNFFf-B~0dtA#^sMhN|J z+q^hV99E5>awSNWw{tLze0IIvZPF~y{4K0*mh?}UCy#+Paf`l4<(Da_5z*?|1J?-FP>7#HAm~VvX?gjS(;j1}O+J+lU5c=s{dz2Sgzi z!~8g;uc~&zd`8WQMW=()luOnS%wiyM5#Ha7G{f(|s9b@8z3w`Qe1Me(;g_?IcZ- zH<#|5mr3gE1%d9!M=bCzx(d0o@-(7r>24C32i&kqGBpdxWtYW6;G5C#3yjv;XvuCp zP$4gBy>Q4>1{2?y0zr00o2qQx&(a-JVV@>*#B*0HcX4Z^X~^dj>`m}(Cix@}>0hPQ@YcyRP>YCXxph$?^?3%X!AY5?GIY@lA=o8pR# zIW?qh$xA}NA`{MJA2$}dv?4_j*--PTfXk*xGhqy0m3D z|4g1zZ)()8vp|!T3T44gS68`ejR$ACPDhu@+35vsu^O{H4rz-O$fdW%#RYwgrO006 zNc><0T72dz89-I_J=0M)JR-v(QpnHY5mM3}Sd}4iwb=yM6sRE&oy*|~x2<)4^*gKs z=20<=YWwO3SiQ>6x=YxGsc{YO3CagW>~UNvL$0CFlBIhILQ8`sNiR2bdpFAq-Flvx*iapxC>r-&hOs|6EH zhZv1H6O0^BT-nt=lNZ?x;o@UE56s-SS*21m5+mX_mpU<`xkz{_+yys-Q&{aN6Ygk> z^+KdR(hw2RWGOA~=Bg$6)IB6{UtE9b)0DOjI6KfmM$0#v$>&3K-y-8#)GC3lA%G^4(CLqD@B|mH^z0xL?;qgWs zevh;e-MoudhNn~a_Q7ze!JT+d4rA|)qku{uE%~Z&pZvib8H;hGP6TfI!))y?$4VRp zKbdzor2NU;>$=y3m8&zROez^Nfis(s0*#b&VNbK=ICzN+5+^$DMo3Fc1>X7QnJ*HiUwIG*J! z3-RoK{ccUw3aXAx6BQP!JBjI}R+i9kvR4V$*mva)LYe_(68K*;WPvFH1rE@Yk?shW zLpr#4LXLLus29Csu1i-3p}^9)6!l|k=2Lo$j9VHm2Kc zs~kB=6NN-VE{3F(>~w;9DAG?cGB8lXAf{>i`Dpxllu2iTx@T=?WGdir_mKiam&WJY z)=~v0N{bP22tZv_sb_1_z%6}PJrbb=md7hjb4tK*x9PfCH!vg-Rv7~de(1YTKHt0l zVH7O2bp#;yvjbA~kGI}>Ym3wn3VX3X)=x+PJj&E_Dylo-K3X$J9K4|w#@4-i*5Vmk z1>-&LQPnEuF0336+PKzJ&Z4y-&#A{TaV`bIOK2X<>Ed-XR%WG@#>6mAv|=nWSci^= z_FNX*vR!{1>BkOj?`2Z63O~JAKD*9l@fe^Ub{WL60l0)oJ8_?vHwuYMW+Rnj6a1k2 z^te>tY&dy2FdGdGJfF#9x;^YXun-KvKJ~mAGtLu&f9kXsE=DjFQB*_M5#6!Yq{ZTswY~OQ2a*)GKzx{T&<-cx)U$_0&?eObu|Mj+h zeFcj7I-Ry2ncpC!n{jfbn213XKO#6441r8JmfuJ)OD0@ZL+PJ;`Aa&X_EqgW}4 zA-r+b!2WCgWr7$lsD8U7(ZP_7ympu*TQ;WC*L*DmMLOZeM|u32WqJ z#DGn*ndf+Pr$T)tnMzdat=r)jLRvr?$KuKAZ5t`xj#utAxr$%`^osdC! zQ?_@N7!=bg6(*I55b9dOLPDrOAr8hTM8PdlSotWaD&VGTiJPR@?@C^vV;d#cuiVJ> z>nPUmQPpCrz0Nn>&ij{0zl}iktR2rQy3&;Q)&mP zfvp`wOwGqF58kZ2dh%^`WoZDp&O3npX-vxHhB(d)+PECZ(I79YQK}NBvX&?+ z9-vt1c$8ChkJE;@js&emx0l^(I`WXtO$w_rKr%%0m?QRb+FCos#w^04B2)7?H73`p zEzrYdK~e<#6dV_X+40rjpfgXB?Kp1Myh&k^simxBUgNKn>UQ!#jb|q;N%=|UWg`2t zD8^hE#`IzIT3jgJfre+!ODDoMdW75Q7Na)bUMm#bP~KbdAOps_l^7R`ymg+^UUTut z6UZev;F-BF-Az+^|D1jSzw+`S3GOkbohZrkphVN%=xqv}dNgJ)n^opxH)ZL_jTZ35Ymg+OYx&xjuzf2n6QjRj$ zbTHX^)IBUvdsQUh!*q$`+cQ;KPCIF2OKI*#OElI4gCi+SM`?*voss9n*8dyQR4hxk z{<(g(6e^HT4JU2#*TGt@99tnZumo+(4e5)rq`!}IcwGmhBM_+T;(-f8S5~d8I}e1o z@W)5SAe4i8!GJQ?r>wg0^fau1%F`M>#r3)Ecs-o_njNz3OlRzb)N*i#3b|d}Sw^rI z)`RDP7*AP{$tfZ5Xoybo+fGp;hX5&kK~MM(+DBy{0bUxAoWXcw%U3)Xkj9qc9Nthks~PvD|< zt0N4^fM1gs?1pAt?ywJ~WePeeVKFF0%LQjvkLGJLl}hJ@4b0}1UzwftlvIgoV$~dQ z=Q(N=j8FM`L{4U1w5YD$p+-*?&5cU}ax$VQETKHFyj}q2A2@k^@!a2FtEOnKOa{IC zI#7yIbXzr^=$mfcy|`=b?*s9>8v!XIx_et8;Z!X%EF1{xoG#P8U%h2WY3b6dj~-9e z?Iwc-^*vk2!+}1sRqz>q@In?VS4pahMJrx;j1^=Yo3=Tas{Egi6Mee z$zjc~c{LQXNAK#nrt}V3%AOK#vf9ccZKhHxGi&*?|G=c-6U~g6PZwbFI$&rGIUPI1 z%Z_O&_=tut&aG58ut(f2@;($W zT^}4dlkNWBz}Z4E&HsZCSu-Q=Sg8UPs>q>qqejUn^GY!9DRTu~|mqKXkErx%TLeHAlE%)aqDA$XE5 zKWW%&zRY&=C^Xua27#-`N>RMNA zL*=#d$u+lN8-aC<&9aPiZK9FY`5+Fxv&%<|^F42Ucw^0LW6)Z>Qdp{oTe@o>()Dr) z7H)*GP*NjhsVhm5|21wzI`nC0$K2s?^1Qx7#79eO>pt6Rd`_&~KIKA)8a&pyNomlR zzdnBd@pE?NIgv^2!Ixa7MAfh_BiUi@FfspVrfPIP`1m=nFvcHiZFIA^r=85k+=#ZI zx44zuFdQkfp(lNFMx+|F_abrn5v;J2?5v19i}sOrNc8>dv*<-m1n1f6RCC zUP!`S#Z6w~>cjCDJTDx|QyiFTRfjQaU15xi#O&Tx1B*?`ta0YjJXZ0+rCehZds~93 zGqiYeY+<{dd~ObuH2H*Nk1TmFoeprsVikejWr0C8$+_*@P%b)8eBI!S>;RaoLelcs zVR4Agq806JkXL1=1wN#9M$xb`+Ig^Y+DEL)*Bw*HzM>7ortx6v%%bBV1 z)qwB-usl>EqkSg+9 zxFUWd$h*6uJM7h?H+!&8 z)6}`_tPRI0G^mINjCXlkCupNx$q&N^jQMyW(Sq6nLYM~n&&eZm+O|A~a`*;xJ*}bq zZh2}qM4|z%(18?Hw^WDt7HDapuF77XGM!W8POg~(^<|JU_VL8z3G50tWOdb& z)hifsz22M)hUqCJQ&^p%3u?7dP4IX`c#8iu%k@MhMO{}CLDpei(Tg-{1h%wp>v#=k zl;$hPTEX=WG-wzTitzgJ8Ba>MzNkDCC8AOHfJ>nnV5xNttCx{PuB{lGCy0w>e6nyM zx_d8VJ-E}tD|}c`Gl}Abzd0Mi3RbT&0i=nlQd*D)8C@&tWf?^Ly>iFzbm28=JVBKJ zZs9dlP}rKf1D=-kBc%^k(j%gTtsSH=hREBNGZVsDzVpynp zqa!sG0Pa4lZ@~l=txnPL>IpS=o%@PSGdzcmR6^vEVW0~-c#oqiiy6utH8(*yY-i-{ z0uJUm9cC>xc|PG?!gUr+Gb?_fZYVep(9x`NNfck|Ycj>{$^_q!kd1bE#RRQi&#^Aa z%0}JKQq>O%yXeI4Y)Nh!z;5ci6$o?mRdJB6KLvJk#!eJmwDj6+1U+;2B1i5^uV-oN zjH4M~hhnY`KM<2R8@|)co#Z`boFrS|rhl{*Kcdl2_-K3iqucQ#UcADoik5Bz%J0TT zX6Dj)h0M#ZlXvwaBSd3iVik)tE~PNpUtN?(YVz@HHYs*)anF7GG(X8F85iZY$J3)* z`t6pvzoxT|U_S?1eP_ShOK%|jW7x9S z;NwG9f5-|B8RhWK6R#?;z9pYsRGJP72oeSHM$jbdkL{W?VYzo46*J!7cL1UL1m~Xk zdyNnRPMaGL&Z{ync}R(HWPakkLBO4n6Ubzh3%{05fuQ}A+b%4a#C%g3TnPCw zS9!UsD|A3d5cqN|EY%r(nN6k3RAH8btVh3Cw$zvO7iV!X720+~??@Eyxvr>V?oE(= z4tErPhIh$?Q$qfTsDrU z^fu~33_<{d|dUCZ+BRf+Q zg#6YVg4j*U#j1?7`bIOg;!1Qs8wyHRToga}KJ37p9i;!_%IRDm7;bj1qfHZdGe3`r z|E4STH_Kb;h47}}Mq@{o$%u^t_&z1(sQ*!*MurjSsuHB~H;8uCfKL>WAgd9>PSCKH z&}vI8g>q`?|916{*i2BXO>1aK=%78KJHiW0T!&F;*oDIC=e>%Z@x8GizQd?{$-%N@ zVO5=Cq4oX!CJHIYRm-U+NL9o(EK;(+V+4iD6(iXl29g8iD${KLv%L%A#EzS(W zUV&Bmhx%DB*Zr8Z?m_*QnQReosrN9hpxdi+8n);aS7lD2!y4)J#@S()a#O^2^9zf0 zDZ|>oMdRW7jZ+>?ZY8IWx1NX{#0!>$O_r&LRK-ZwZI#0_E-5&G1oT=S-q8u2>7q++ zGE-=ji!Jr+<01De+Vl4MnJoOXGronXSZlGO-$7Y zq&q(Nf+H;2@H{dFrmeHxvok8{cF)O(kbu~Af-gGPUtHtJB2ZU|OpHi(jQvh%nKrxiz1f*kqS z9~gL9&9bQcsCCOH@WD^ekrX_^Elm-DVo{L|*-EW0$!`F#AGf1N0adMBELs+^Gry|4 zs@rKNpW&+V+h0h5iEi|!&oFgA*|71wm$_;n5aEbXSh0x>G*&xC{;m49*PU2i>Jh^Z zUSAO%mu=C~f|>hGr>uWFowELdPFWlO`+xl3Cu`TP>Fm?O2 zt>;0S4C!YRVh0$+o{E5M!NZQyE;Ghbb}H|8Wu#jJB%=mnubH{T)qbS*MQsF3nKEj~ zrn&ou8L0N`k-l-nB8_`HAS@t^kuKBxsJx7K824k6#yr*WDh>2}+oRpDW;Q}_>X%); zT_)A?KLLX`(Q>E&M9WmhF+Cm~DX?rXV~NN)3EXh&i~Q=Y9o&o{zZtM@K6Hn(*+;c+ zoAwdX(qhxun2JaZ5jM*XIDyrmledL3&9HxWc-QvJx3&qBUS!lt1cn}z(#gwQlSL%K z$xjl^d?Z{#MUZ#J)EUCSR-0p5{d8RP#(1jKWO=F^R8bNeeZrY79IVB)Y!^HW;@G`W zWdGXTDmp5>^je%$;CMp-b_>@$>-JPCobpDY=1pOK_+e-Op4GV7OxC)RwAQ^>D8KAGN z=i9%C1!2rAF6Z-Zr!(Wm-DmGVXGQPNDR|}@+!K=e4M^pp@iWaFh-&ZA%Y;oKI2WGT ziAuOMI@%qx3hN#{qq%n>+Q!fsTXfk# z^46qQXI7G2WK!ll(l_B)L|+Dw8s(z)+fgO=XYTf?ZO9WWK z-J@o1Bi|w6?($M&2`SijK6=WPm!!Aqf_8aY2AQWWLw#BHrM4nw6s#;W)M4V356-3- zwLpAnqo+Vujufq*=6j9n#ey?O*HP=(MB+XPm2~ctTAX3!k}$5cs5<7lb9qlJBHUkD zRYB`oa)WHKANfe5;cwhRZ2B8mu_~%1zbPX-i1%67S6pAJTE8b*(-v(n|YI)e9sJ=)cy zII)w?ynpX@pa1r@dgukP+ZF3u4`9$;qDH~Nwp_sIAjSw7b3%b48_-sVzX<8q!B38u zJmrRu>G@%v4YVL*OR@Aii#*S$=M-|;P$!(a!EWO z%B0N1E0KfHVyV<0#mI${P_?nr6fniWqb&KK&Y}iUEp^^m&AJtc>eal+T@7greY)I5V5?5P!!3cFOK(U7v z9I2$4ilvzsbHy|Wn{yeG6$$Xl=iMz*aoHw6ZSzymyql5@If%|goDDUr1+=}wYtv9r=1|fSRtj)51lrP|L=f|G!MSg^8S0R$x9beYMf}5XAFE$&Akz}vh(q$A za|Cpy385#&)G3K_@=-#eV$CoQHZr>19wRNQb&+18UBz{$Q(9if*qNp$4u@yeu1F7e zr}K#lnyAb4WNdS-dY5c(*dT=0t7f1JxJCk|_SqY#(8@SoyyC4qrAd~u2*)f}$mLWk zm4bkhOWIJJj)VhuB}6U96B^z2iVsOrfcGTvSp9I=O4D~>?iqPz%FUGC2Jb1O%a0=z zt~NW(>dZ63sO8cW{wWrUrVuPtHTbuK_xiR(E4N61Qsi`=mA%B zU$qit4X?=Oh){E~4;9VG?U~U}q~F+ZqHMfJnqxUG7YQ z`SZMzX6Pd&G*@tZl|@rGn{X+5KB%gkh476PDQHrzC-jIw#MCqkHwWj#Dw*HOKElpd zAJL3zE^Vr1X*MN@sJr^Q@7*q zEyVO0m!Iu!Y;A6m4SGgW-tIkq6w|OZHm`?`?==F#@xx0U6>csP|IP#R1P*BQ6pd>kPN@?#7-?j+t_e^;Au&9knoCS- zChUZaM5(|(fC-LfUy?~h5(O>%C}`3HL2Hfhh>Vj=BNR70f(WpJ_nc=r%Qo#h_&ZQ9 zJWgKod&9>c5K1Ryu7DV}HX~oM9VqQRTx=;-&1ybox=398{%{EDG7W=&B-WBYhReI9 zW8p+e$!CX`i(PJSu0vlEwHxsSF^fZaT**`$OuX)(V7W`N?3Wp{m#dBOYDTrFt|+mh z8_)RbvaH5n4ZhX*=2A1S6)Sx;Ech32X-H{ua|{9ndllbN(wLb9s|X8yGM-`OsdKgA zgZ!wS+=gF}gP9$+U*L-N_A%<6b^5AG!q?gF8;_r~+U;9idEE2fEk{m6$M$4&gkZD) z&(3MypB?Wc-?_6nIeSH#{5q=>vrWlb@+b>OAtQZyqQILKX_pRHW22r<6XP9`UDjGN zs=y}lF0>CN{7tEg4~AG!l*PQneLZuIcwN+>rv&W2PfXboxTW3=lpC$w;}D~kM#@5T zAya1*@6^LewQhe{>tw81o-=gG+e4b?8IOb=>W1c&kOaS*8;0;I$H5|`Zn8XaW)h2} zSfc;Lpah)R@UdE{+rF&R&2-Gscz2+0E{GjMqevVgNjOYx2@_mE^e$$v*58&uG^P0? z)K-5j_;gswBt6Kf=$5%`&Sk-!rAt(mGsyvnusFxBz5V7}2c7w0wt3iIfi>S`CB6Px zNx$^(e*Kq!oeZRF*VcaLrw@KByZcvvpIp19zrUdm!=Io1L_ajLldKaW%@{ zk=^u9fBKidce-)yn*Qn@!1(89Klzn67DlnMn}PZ~FAwz(Z~gg$KkohQpZr?u+BN(hmyJUMcCn7{e%PanMh*Z=t2>&&gc&0&7_bYYla@8py7(LsGHdO@uC6lJIsI@X-0$5%n(D4WPmg3dR_gx zyN2~k^@GjUh(^3Iyoi-JKa3zhAO!HnhBk>2#2c@IEVA>?*?42XI|I>wpnSgPoO|DU zRYi(&*f!HPHQiP3-Fxo2=l41H+SSAX!~pZ(UKeD^PZ`O+o*`z1b` zEyo9oG^OlUhEe~gL!#4tnQr6CgYQx!Du>}Eq11p`Cu`a zjJI2#PG(2dYL?zbU*5WM<;vx)Znronr_=e)e0g{>E2pPBY1aMATbH-K zrkRWT@f|PSY5(Y%YY(da>3GmDhs9B~CHOe9W%`vKWku#c5UCz4vZ$H0du# z)p$_~&i!k}J|FD&uJC! zR>g8&9Tkg7F=B*aQ5I)|qpEmZ&IZ-uyqFvp{mHmrO&9b1Vm|4E%VJzD&L*=*^X=kz zFfPaT*{Ga7suuI@%Ug$o$#8OFYpEYB$4AAe95Wuvn9aMzU4|WvCgWl{o3Ox2fS)m74$Ij|Rg9M-)*4`rtG--DZx@qU zFr$3l<*A`6J=u(Rgyk zR6;{l^o*MGQW6d?Z~c?g#bP?&zka`rDU*Y($RBgKP+fq&XNI54b0 zb7RQfHoGkL0pWDN_o%lcyw#HcMD<_l@No(0kj!c~n8NTE}r!E|%~w(7*H5`$o)gQ``3Pa4=-+w2v&5RZpuKJL#-_c`Na@ z@zUx@Y?4i4AkQYF77T@0mVH|=W*>y4{h2Hm)8%5$&V935d`SfQNJ|+`CXc=-Y`qQF z>e-daE^qB@{<*vbU&8A%_I$pmrosF!j|(`Vm_c@f8AOGET7r>o@$KqMd_Jxuw|IG< zFZO%C;ezg{T$Bhoe^z2-I4N1ktlBxL#?{PtH&81UTY1~R>BUrfT_~7WLnKSu5qs_O zR%_O@J43?gfs8Iq{QNn9;_}u9F3o@401d8#h)lg`n@z_Zi##@JmLhgac;5~b@!wE) z+l3YdiJN-0KZfV|&(IG~B-h4IL_M#))E~wxzJW@C=|nK|#q#JJTD^J0MVl|y_^AZ6 z67_Rb>^8q-qfnD*6oi^6a4^=8&lCbKO?_B)q9!;v?X!u?22^M9_1V$?{N4Fq{Ih@k z(f{~^@%f;bu@T@mZSaHBO4B$`#Eu*ndpBH>X^;CeBn;~|jGt3)bj)4+>OqrT>BH9|3u(LacaA^$-?1bv`tC$oOFbA(3iON6{vj>@BQ z=e_c*9H4GbaF-UR?6LTS$-;mz;z5!pZiLbAXFsEO+Z+u@0WjI+;b6Y#z{e+5XYYpX zs%`6JkvCRKp=-O$cKWBLGvc}7d3SPrjGKnX#*;jg-j&P2u9c(e&XvGT_jEBDUb&#{ z!?(km+TSlUiIAY5eZjC_bi3F2|LTd`Y32f6Z^FdmdV?XxnE(5r$6pth%h3>D^?F*{ z>UyvCdJWvmQ#IPqqvmnIg>5&mTfA#BQP53i5QrI(=aZ!wf^)Pl6d+^4=W~pAWz}{x z7sU5?P@S2ua_Mm~%y1V`2K6UDPm;fXiiuZ2DkgKo>02?EBz%ts$H%DPF;*D%(A?-8 z@|+x_e1upQcf1^GC5aW>-;|-eqw(b5BrZazu_eW4ODQkmQYN;MA=#%~9AJ>FXEMh*b`U3(9 zM?SYt`MAWj6(uI5H0Ds;LaSd)rg>QH6CWZ`1a~ri@yu%|lFJ}pM#SgqN2-S*8CXu@ zx)$c!#T%|X@HG$mljV4E=jPR-cukI$oRWfB1uIsAcD3+qkM~d zCdQHBlZ`@AM1*ZB{0(*n4HM+Gs^7NdBxBspJ-oB#ITfzf3W#1dd0Kl6qruZobc_2V ziI(*tBJ9;44!#Q&Z%YJ8WL7iqVs`Wqdg81p&*M?MI1<9WsPh5&}2fi>D!3du#pft7UmY)S+qe> zDGC0>W|<{{X}DAz(Zz&@Y=v9=!K6!(F3aIjaas+ha`Y5|CAB4M!Mmih5?*j}#J8-S8hJm$!*3Y?tCmyhu}$!XjukLPjO$M9;}o z^u2Q*UD>S5TZZAWAa(EsA;JsTO zw|sE-t*`Igz3N$Xx3ej?}A+1dAnjo>NK-@3$Agn)grUvm>XRxF;WBsfAT=Th(`q(y`D^UT3Nr|4g^@PV z>H|#kG9K{vvr-Rwb}hz%1Fdu_K64}( z9D8ah@m6qCcw`vO64c?z>6`*Ng3uF9wuTYn(sr29{NOI8uLU9>0O{Si;*k32!Cl1+ z$v552aVP?e?#f{6Xj*_fVP6G+}VhG0_ZjNJ+<`evok|z=gOuEs0!_6il+3J*J}Pn$?`;<~@!7Wf13cn`JcI8V<23!b^s&GD zO7Y(Y&@RTWi~KnIt(dQ*b4y}6YLd*9eOueAX)|-2**Q+>3sL2WY`-W?rJ7*1dmzkmB?Ph`wLjF z#|q});E9ag`7Oc60$t8myW3xRaDLJLsKu1feI_mV3M9_*smMYeS3adAf$6&Q4^67~)Zxim5Ff?0jrl91&BZ#W2 z!dhfSZeORwXp&9ldnlgf?tROxf*Ps7lDI=Y)*1A`s_h0VuzEg87X-?(`G?1Jje6@> z6wZx7dB@UI<7wA3&E5X#gltJ?4>?7s3;TD7)o|x+>MN?N7V=F%kPLIc0&5;7PQa-8 z1mmxRNfCI0C{m&&P}!MZRi4qnF`!@QrI^)TO>}ZayEv-g$e4_EeZJqlb^P>7SGX$| z9ag9ZItf5~->K}%$FYzX8^IAA^S^w(`@pgiIq->|rMRI*6F>gpzxqQo>ZF()TJfI? zFhaMev-v4x_Q-@>;yg+Vlz=}PsMO>&wsb^H2vT%+IQ;fAFlT9cG6sWT)^$?RrgzQA zVDKe_LuWoEDKpL1Vd}KGSIskQ5*#GyXpVOV^H5})2*M^%a^a#9&}&s%N{GyHZ$o|~ z$OE0!Rhc1zAe74RPa`7~jOxCULi>ShJo4B4HSngl600}fezW)nny5S^VrY*f2VW+C z4v06Oms0e<4-}@G3;&j?Z{pezmr;G?Z7U!XWQ~#Z=9T13`B)G4cR_aAp(}TzWTWwt zbJEQXoYaJ=$xZJ^7^^)96N?4QC!O|tdG7b|Syddl@wLHt?KM#SQh&5{UsokeYjcmu z1IOlC3_|rSLpYMksR&*u8EAW0+X|lXVf!OO;Y-p4;!w)2^v+?0WU46rJ*z~2UnFOJ z1eYomCA(Nj>cr0CR#lrVsuOq_OBW>*#<;O>P?;tWg@}|X&Bm^!&Vi>@A!z1GHTF0Q zlv&fcmmaA$PtyR?>S`Px12d^9$5bep0!uflj#T>!xX2hPA5Fmtg9HG)U{0X4gMy-R z6`t9%peWLI$QP>)p^(4^R5fzcQrZ?+-InU_uqZ6EXO>5nL-tWq4q;v2*u)=A;@ikb zG58uQQMHwDZd{b0c(}j!(ijLFywo4z^xKoOFBacy#%0+q*!t5L0xeAT;d6vyCLVE z;|yiIyWAq$uo{7gEoO{0M`$?;-Sg@>LT%85i4t9Bw zhyu!yr;jO1MU6tKyrb-x{#YQ%L=p=L9y!6SN*4C~=8Y&NrRe4W&8ok5i5y0SqldY& zpVa%>yYJt9>%K;`Ds_tZ2jrP3-(L<7yJf$7SY7{(<54;1xW)CL(b(JT{_4&lVYySC zMWA>+ld&&F!^+R%%z|o#LgaeAiSYxpd`9IL!d}JhRQll}0AUn@;qJ{_78xQE6Q4M; zNj-DTG(}7DVaaa}ce}y?w{?B;a8h}aXBwByk))tn z=A~f0?^bVes4U?WMEg_pMLqOpjbWJZcvEXTU%(`*HHmT2d=d1?#%3XgNyNFGHLK`T zP5UmsjE;ijvIt*M5|-h+d{gIsaddP7h%XNbWNDr;3fb_Ay`%0;R4WMbWRFZcP z-?5_>g-=A@snL>)?>g;qBu#2JO2axBAc5=0(TJyQe5{QH) zgDB*SEL^jV%rLUDa(Qx)wbwV3u{^<6g(WPih)Fe~`Wa#CXa8L2W6u-2P+>z#aC#J- zUrjwzlpG1nEm1ld9&c=R3Fl28tLQsNNQ5sgQ)6`LL9(>U-y@gds(-OpV*~ z3FK_VY8xY9jNYgIkIX8^p77g;gKCCG6{@AYIqHT|W*&@*mDO8m0lCqNBdpR~F|C}R zuzO|*o{V%}&rA^F zE|paY^X+1C#!)^THJu^hSR$uR6>*F#`6C@9`<~A4?WO!N$A8LU&)~rq<=idikVPo9 z)~?V-ah*=*Iqh|Z8HX?5ql{ck&QaU`Y;b6c#GS7(**K~b0CaWaRVn0pd>bNaJfVY4;HdgJ#SO!Gr_0*YP5-L$1eLrat^gZPXEomsD<3WDpUiEo75u<3MOiK!~22+c-JI zlCZ!fR>s=q1={cTph^XvwO{N&V>azHAFP#G$M&ICS9b8=KIA{&dN#T3Z%GacdHeeo z#r{OUQvvn)y{Db~S5erX98*gB`R>!tKPiW(Yd(DX$%6Wb&%wdd&U**9E1sD z5ZvdCAGKJ>qZS%hH?6C>j;)t$R$^5WvhpZF6mkd%FET;6c}-DQ$B7i3h0pYLE34;B zowIt<;3GU5sDQ!sH0d3th(iis*-p88p_}Ati!wO}v6T*OHi}QuM{Z+nne#e-WcB9V za4!)ne%4d=@xkE`Dvsy!d7(yMg7mNhud4pZ;$m=XhqKMSos!>)3Ob=>O~xc<^NQ{y zrH{Ob3S3eV#Q^A8Q#Lvl3K4;A4F>(NLH>j&iKl#G%j28MtOc+}tQfF`d9}=5X{AJ0mhHaoDgFB6Pj=++<;*2DSC&+PE{ zz{hJG_i+JkArd`&ZB`Ps8FK1x&cKP^t`(h+zWVmnq&pK)l=FRqzZR>0M(n+V&j;I| zJ!NOA?|i=V$@f1wJpO$4^kA?PzkK$|_rJ4ueeaX`0AKn$dr!A*l(t}vSCHu`qtnKr zlU>lsz%8FdH8yStWnZx9lyeePLL)0f)AK7uDM{6%2U1cgmg#%*uInY1A0NZ!Zc4hg z2fC4ptdEy7@MnXYiODHKY8fTvq;Pt^R`9O#FU9~?d;(m^ALBuS+7L5%~NtR>`w`oCb=qxDkmBE$8-MM|=D#4Ku1VsAo^ zk}5P?!5nrrutETnF_sfdQ>0Zg8d46$0fE^IwbfvoSCWlxVfz|$ulVt_d~NiuBaXv5q`u6C8|k?R}>aP zx=D%5#b#7bAhx1Pm03W8tQ;?vqa7-prq+C-F3)dN^h%oKrL7tY4n3-s;ht?%)#2FG zHAvcCZrH|^#KOU>+bJ&ysw2(=SEAa*Y>8*b;)@y?USOloYDRCfgFHTw>>k|(wzl3= zfX3bLS}t}zBA(j4eG!TQ11U4(Zhdmv6M|5Sq7|HutXs_7zLD6RJxa7h;toN@*TvYJjd7q87at{QQ{wT-rFv2-{^7*n(W*KC`)jXD`agoZbAm%u+~P= ztW=Q>uKGG#3rwhJo>-MBmaIv$R2-SLb(A*4OdBJcBy?6ef#H|xWJ31DWL9bQMC?hF zV1VK9fn|t#?NFFispf`PlXL8@5I7fv2fLy|eu-s?{$ekvst8SEssB5+1A25df9B&f zy91#BDYzCKnCcJ%X{Q{@eClC?luiX-Lm>I29ati9|L0~q! zflrpy`mDJKMEURoTsg%sIgzJ=EM@uq#H>jtRnI}~4VCB4M6*$pAk6bHIuCn1F`{Eq z31;QAqjIfP)KNLJxlo(=s*^y;hQiO%9p+^w28pfn1fze+PxaI zhib6IsAe%ZS6W^)&`TUEd>RWeX<>(D|B;&Kjq#!oS=nyVcfr>6BWX!!>j*uDZ6Tv6 zMauFF(t`?g0(f}E&r!(XT5aC4j127=OUP2Hxo=KV$yHA%6?G{h>I1v9%)^<;t96mt zdpXMBhOkRT=Ct^OI+ZEZ=ur?T^+xyR%9v(BV)H3#s2YRN(+{5@4Jouf*MB~3ne_J6 zEtzrp^46!Ex}p+=T3Szq+rC&&DVppQd$(g-*wH zgAl|>UKAjQMgVty=ix&D+(R_0GW^!3{1UGp%oj&@Sn5>-_ZBCs2C$KNrcNPNui4qV z&C6GdYsKr`8`*V*p89x(@(w$cO*t^?@xTyQFGB4;kW6XYQB8xChE`BzLL92U(dp%R zhSP3WMnYhXwFf!*2Qnpg!<1mWi&z${85Ne=U>y>ZX$=9gg2a_>Xj^j`k1rjo;TU@h zsbI6~#We<$JaBQAbh9C-Wx0%;!bjz2>OfVFkfHY}VCbO7wC~9> zPR|*@lx?YyBO=QUkdtp5k|v?N4z!6W>uSk{BunFu>@0;ms$n^;ZRi9J`+agaKS)y^ z1oRPdIhj*yp;BGEvbwhg^@Q^~H?CfNxC7$%VFt5M_Cyg4OUELxm^%-5ivdUCg#Ar3 zk%_5_8LU1J%fOQjjpS>G3cMvWDgS0wY6i*$PNZ0}m3LLg|XT6wY@EXS|Ra zKJw+-fCaWrxCao2%X%}E;ApEkYz&lIRys9{S4eO{)0|NvZYvVU*HuQ%J;_ZlYN;HZ zLKJv?Txu-~Xnsrx?&F|_X*F_W;gC0AXsA%oAQ7qzvbzKv6&=Q#!UJXr&ZxzC^j3XN zp7hh;#4^i%NCA)9t5;NX$T(2{zcdmnsswSJUJ-v=uLOgLIC{lq)M zfV#Dz>sZx%X-8{_WddHBA>x6wrX~7iMxkJLP$n~>NCF9?ntAibK7pOQW@u}lWGZ|O zYb5$@9aJ`vY!|DWu?UYx-r^l#uq*lwCRa+f%O_Gfy6jp=U8d}$mycexl zOVCdw!)&CDhZ-!?uT}|Wve;YN=;+-MF@1Dow1Lg0h%t!u)}of+^49IP)3ZW<96+*T z-JCP}sVz|*@cO`Z^ANXns0GqH#cjF>92lwIDPC_D9P))9T*f>I6WH^k9fgy&i;l`H z@J{VNs(jKR=exx}F79?8=+*gwQzV}{BwhbchqG!vV-A(520w`R&V$*qVzBwabUgbR zU-d&%n%isj5(KKsf?^jP8^=k|xNjHXb*?kPd5X9)iJvF;CUuc^lYMEJ0(>ms2#B(y8^tLUpev?@q^k zw+pEB#-}~}OcA%wh{W7$t-m#&rs6G)zx({B<~$&rGnf8@a}u&$I9(faPV+ZCc8geh z^zloZnZ%<~?7V4W9aCvRMoBz@bn_t3nROUC^Q7F)3)k zZv%BtIJ!Nj$fOZ|WiOTPl!ams2~{qWW`_feFKjT{5E`kx6K{K-S*Js6;`>S0r&38f zhPf6F+ewcbNm6-Yi^`lIc$0bqlv+#Z1FLpYZIyRHmKuMvxKEoVRU;&1`VwvKFngg9 z?o^0KR`h|W2?kz~+BW}*Is}-2!61?L)MDV_SxL`A_Csj)A};~rM>olUPtH`UXyr61 zT3K8pjYi4-0(4%}kUJz~c}JEjL&{2|uOV>SIed12_cgi*g&wN(ZzuVNYIl&<7HKLl z)D}rWYb6Zzc>S6T1~XP|Q|q>kHh#kh&ij{3BchdRQiuwCO`?)6*$q~p8H)h`wc){a zK>C?UPliS-wxxjFTqsqXu~G>mfHWNNtM3=Q?AKOkJ1qYwn?o(=a9DVXt@mKz(US2j z_T#%y7b~qsR)yopc&&HJuVHmby@FbR*YIqSV-+1`WX7Hhb>YHN^^Mk!-H&scF9V;( zB2vN6+8G=M%;^x$6ClFWix1KN1fuYwSLG|!UcorkP8$Do9WNib*6`j+J@+$@N*MQ} zrS<4Pf=qW@nFoxgT3=>y z7UOUDvVK2($#F5$}iV z5@ivu@brxS(9Z)D$pz$R((p;-5%>LBJN`D3Gy%L-5KHD3udj8**(k@H6eqe5EOM`gW29L;RUk`mE?4U6ml7p*f{z%S=#{#xpd7%0BH&j>p9d+ zGU;=tA>enyM7Wq}mPFc+3T@@=!@?W#Ao!;F5 zez<>Irze@^rTz%&uUp?)w^uMFdp$W|wv+03AamEOS`Sf%wQ35=x71XxB_hnGWDlhZ z!@@PJU=^%9I{p$B;6#m+axpWN!(-X{W~)<^nA};+HKTc>Qt5JUIZdY-HUPT@m7-)k z*IrW4fthbi843a@l2*{@O&eAb(@euo$&!AeWCJ3I(&WEr2RMaMiQt>cT9|8V)`(V+ zTZ6PzQriz`f+A?L&04FXJFV82lNo61(GjEF2T{RrwcYk(Qv>X-;8TDEbj$wmRCR zr1h7+&(#%8R+CxiIIGQI<29lIZ04gbM`HOJMX=p1zYD!;20v<=f> zJ?|jE2X-TtT6NUg)>WV#uw`K{^+$-ht3<5qTb*`i>uhT{oDGEIX)aFku6y zca*GYW?#}5uhi+a7Eq9os0fH%WGaXv@0+andgnG24 zzQMULQ4&17UxjF*mQjmQNy!)M2_8(mYch(HfSB5k@+Jl1)3CpUvliMR^)ei#_JZwA5JEKhX;fYfd)|EQ zyZ~(ljrC5hUTdBx{7-a5{zt$4U#|bxuUxvMe`_6)L0laE+L+8~T@1+*DK>qLXbIEpDEhA{7+(%)OB-hz_J{k&&q(6R?$L<;p7`GPo)kC3rZ zRgO%Z)f0utY4sR?Z7_g`0ikV}AS26TG-m^X7ldZiYS^{5SfHt2 z(*=DB_~^U$KYnocoy1EGI0DH>)gkpWr@LSI$}ZU^s)JGK+mFbwo>V#|nM8W@5;hI* z=#3*PyKlTdrv_P>%A{E5$xs(sSix!e`hM zyp=*%$tp1Gyont>{HTtU-WF$sBaQgz)SG9Oz@juQ!GajiX%;V$*&a3tEEDG(R7Px}R{IWZH6 zo9uHwyYAu|J8q__g)Z$1N%X$5sJL6xso@t>Rx>D()2gdVli8sXmFwh7k8lUn#uX)O z!N_&TO&c9G6C{KEex-_H$4S2Cw{iTpHIL|LUg>nB`n1TXw%7?B$E3@BLvm4t$U40p z{+~H2w(w-_n6na)mKLI~ymnm$3*F*FqJLmwHc@KEoERYkS93xaKVXRQ8_|~3oDc)c zYnEayed4250i5dF!q4%Oz=zzsVrg?Ob>*Bqm^1sV=|#xdq&*m3oq5S_sAer5R5X?=d)=$-j)55U*YHH~v$l%&UH7nN=Yih}$FiG5wVnz`-B7(`k^!~9D}A$_0&LdW)0 zosC$zJVCR!PF4aDvW?;|?W#q_HG`+goBtb$1xwsFKiBV;M1lx)PTHn#n8~&O6oo?> z#*SOi7i2-Fr^|^%7*{z# zMVghgbMe`PK>|6NENCli@@o`M(Muq6Y|T4b0b!X&mrsb^@ZAtstcdeIUKt!2D}~fF zd{)SHCM!{o)gU^2rp~oVTxbjml+di6^p(X#Q(A3~T{f!$VYqa~>j4`4KyOi{5?&mh;}HITO(lx9~2GDIDp=#93IB|n5_Qf$mu}>g3{j!dc`T@4~!Z4!lD<)u|T+3k77+GSEvl}jx zW!<@G+qMpf(mOmGOrS1o&emGT*8 z2YDqDJfPuM%sU6QTilP!Z6x(^1<5&->1^+#UJ|tSaQgV`2cJJu#nI>VVtexRsnstz zoupd{bSJjVDW`}yKa*Z?KdqMfT8WaLshwavKGiIOJH7XG{|=w6O#Za<&efjR7omZ? zpjS%7P39O%_dTxR%FO~zpf1$1|1FcK4~WK4OiRZ({h^;@TkGPK{zi|UHkOW9>j=(+ z&v%<26Oy(MX_8b!zg>JXJer`p)3@(EwdL6a{7+uyRefx5>GVuOy7j%Y`HOvKjVbw$ zCdni7_j^x4h)JPTuvA~J>-P+;s-SFfzs=^Sl6#da1^QN|M%==cuQQ7%er0pP$R5}Z z9*Ko)!e=K9gV0vfDdCZiv>JY6g7{6%vY%c}KEbrJcolP-X>on=YP^F@r$tUa9X8SEtuPi=Q$27dE_n}er;EqXQc7NctPV{3U{gih=g?NVikt9) zX2|W;L}M;6RwQ6JgEqK@K_Rqoq1T?b(~+%`K3|2b9&1+XRt8a{>T%R??yTgbxPSNC zk`(|5iSz$(FadILELG;B(43Vp4WgmsnnCJ*_)kS zs~4@%f~Z^ruh~M=jwq_8r>9qoPyE%owJ{LDP6(wb6)mN^>^1`f;fJJQxZwe3Y&>oEX&EVz(*y~oC zy}X=w1x=f|qcv4^rHD{lZv?MYZ*);RsP;6j0M2LcvXpCRZ2( zNQxOAMI57Rtg_PeJJQ;oZPYUYMeQGBMH| zx+(0jPKBmAIJtxGE|K~nq_39ki62+d#&-B)NIIqSKfLJO9-*1WhhgV zD&BH9OApupDfJso8^pIKHWK7=S(7PWlqP7ID*_HN(k@wCD_ERw8{c>nwc?uL?V7e; z81XT^vQe#}C9tPDu|W*NYCEbiTuujC_TRy8RxWA7lVNK6W3H_4jAV~S-|+?C*e~8v zR)7)ywo2-^=%_+6Aj|MReq6C zQuH<#-y`u#S3POo$!Hz?(Ob}&i!w_f7JLKJV_U27%XOG_vUH`LV!WdX9H_ZzThsT3ac(2# zuF^OJWkEc`Il}$1YQ1V!>BdOU_V1v~tddC_9;*QCMc!x69h{Z-RTcIk@1yiLzJKcl z-zO@v>HS>snFCH#3ojorY(884@EGWif#8@?j&FI*p?4wq=z@}`C`F_iLYCkz z7ll|GO;LxpDUc-S^?W_3;)oLcJQODy!wj5GDJKyOTF?dYz4v?D?aIY1Uf*970IX40^A0Qj)#({zGm9Z3_3 zGhI?f7ah0-8^Bv2DYw8iyjrNwl4DWtAnE;FS+u{~#5Xn)>%@=XAdn8DxPWuN95Ws; z1R^&eJ{l}`?A8gAe8Jhpw_I~3ZQlDPo$1Tj{cpWhi-zW#CK}*Uj`v(AP^kNWu4j+p zdb-qN{@ARAZXV2vvHZ#y7D{Qgf{a5`5K8kJ3tO0r|&gwNvud+UE4zr1ATGU&!2TNZyE|b9>nx2)6 z^C<+Aa0Mg`u8$nmlygz~oPFb?sy|RCVouFbAte9D(p(@dC7Kvet8fuxoOlKyH)yv% zsg4h*KSzPIzp$DKl`;9Z=v=G&PgUpP~9;WnqP4jJYJm~AB)1f`m?ig zgCsQnN|m1`c-UucwRtsy+fuh4XjG;m#8S~YnNda>I!BvjU(hc~N=w<|wXb#&WeVb2 zRK=TGq>`q5pgQD1!z`?0FHO@mWXb!eX$3DWltqD_0>g=w)kxIsc5i59CCq!txk8a< zfKl~Cw7rRbT@u?ZKEi?bet=|*1v})425tKEnArWr|;V1!D@{AYtmeBY&5Zp1=QxCr9Idc-*zBUP2l!D)xho&>uA5f9KA^hiPlcC#ftKf*2stGURuv8+}tdfFz)ed z@32NM+a-H&6`>Z?zN{WgztusI+D5jMd$#FDQe1PUP!?Lg@kL_DK&aL#)jDgu?-k;zZs_Di|HyEH2FK^|1y9JD5V=c;QtKeBXpl@B|9L*|h zP%}7ylfq9Oi?Dd;jIP$oEuXDnSkMRZ5oL6St~U61d~_EOPUm)MhA1q8IIRaC@XUgv3Az}T!h60` zlizB6cq8Erqxtz@xi9kTjO#F4PAp4wBJq5At0{X#TS!jrW%B(H2G(#w8=09giLYGM z5PK!gGXynwntnO9f5qK}`;UzUc_Br5xt@K4SoGUJf)+Fu^Kda2vb)C5cJv8vF3BeM zHCbSFI+=Q-2t($(E`Oz?>gE?_=b4I<7qrTxsj%;Kd5TqX`*1R!jPZoP54@gZx-~x` z0h|*lR!wMM8C0*}?F7<_Ar%5=Cp{ZmUBgo~y z4J$8T;@ZZl`ks~|F%`C_IV?EA)QvA@824a&ZJ}piSI(K>=Ojq0b!fPVYGbc$)7_9h z38mBS?XJ$S5hM&w0o9~jNm{4!)J3gMr0IGB!z>q4hHz;$?MO>Vr*XsZxU8F@9eanV z4So~iCa8*at%6jiy`uD9yEYh;VM4#BwgdH!g8{3akJ4(RF{vlQa3+u4Hqsag-mFYn z)#a_8cvUI8KI#1IMsE&Fzip2m=uv`7#gBjZFaIN!0Mcyii!bgE}L4S<}WW00g!`{9oj3X5_1orl-twtBWBtLx|dPabX3Y{+iN zmIrTWpoUZuIlt6ohk33M1F`%a*M_)Bl4!9h?h(n7CJpk;Mn(>J$?eOj<_42l3}mrg zd`$cSzWA7ExLwQ4D${m$>&H81koT0wD5x+rZ5IzND6psCG;2oeAy(8wcufu=Go>|C z3;d9dKt?4j{)q+6qF{s)60IiNq_4S6;(@~zu@~|X#z3be8)@%@o=d@%hb2eE=8-!V zmL*GTuNY^?`iBP~j1#FlqTu;XCcvvTqhFAj3@h0pEmce8L1g}GHtnn$5e<{zuNV4^ zk(vxeeWBIB>R?DNsinbzR;(pB_$GhXv0jr|e)Z#n-Bxk&#|L{}f8$aV&mr`XqSTzS zL#-22#t!MQPu??u7)M)1oMMa`lQXpo zoRs(T6(o3Tt1kOV!ha@WuR)ntwh2zatc?FZQfWC0D?ZbWG&<{YmfkY@{prS*&%xmf z7L`pc{V^m^I-aGA_27eDDmqjx;BRyj?41o-p9;MECj7CPizpRH43x0N=$g8l{_dbw zp3rXfj&3KTg!~D$1$W-im2{iC!KC*{!&8PlrU32EmGs#wm0V~61!sx5Hdgq-T7OHu zU^0k0s@Azm9RboIk*_g@xuPCwc==ZA1Gw&xL(R#r5M5-Y(vl`g-ncy z-{=MZQXe5B?3>7e@#1@Ap@nZEYE(Fw)e}}yT5^&eByO=e2!kNF=L+rFvR!hypukJe zAt18cmN03A0Zp8JNLGc+y>5wVTq7;zsXDOem@J)dMUe|#gP^tw%}cWrn`IFZ*A8d} zaeav8jwPk`ZfxuNN1)wF8&x;9&61@h%bFetuJ0F z{#}RcLR7joIovl(3P3pKzOlm!3oAu!*4HKi#NJ37RJ-#|uWysq<)RutAoWpoObAz= z?|yH^zxP9`Ium|o+T`VPIWiB+=!CLn#NTzUWreN!*PYsozPeK{jH6O`|7>u?;mqR8 zw{A>nW(ICv>JQ@y-I6;cx!lSkt%wczMzco>$lJk@9Bwg*;>pDn?M!l`(aAzZmx+Zn zeA<#^)Q0JlYU;g$fV^$8sb<(B-$-}PNYUFd4b=ig_Dc2%ps2^F#!uaGvHPvj1I`e+ zd#gQyvv_}`%Mc|jP~$2fF9}NWxB17b-GwEjbSDc&hQ_93bkH9l2N6?7b~Uu_PXMbs zKZK-GCtzQoTuUY;ae@dYnmp85IDp8PIHFu^im_!rbUUKI7_K(xn)*3mxgZv568-^S z_GF$*Q4MU`^Wrc5{eSpBfBY{HX!qXxxxe#;pZ@~+*h`mw;a7j~;h+81pM3W(Nzdxv zFWIO5KR@|R{TJi+V6h|=KsMpP+BK7~Z4U7x61@jM`QXjpE-qctKehBtKR@}+U*@*P z)(}<-IUEr2Tp8z2zw$dD{@$N|^{+nul}nfOPj@_~pP&5Z|E5`RYaFi~SQ%r}J(2Yo z<*%=d5oTL^J)2q-*JE6pt&Wk4rdMFulmM^C`NEG@#z`Duqx{`p)RVu+|NZBmyL9P~ I|Cj#yziV&8E&u=k literal 0 HcmV?d00001 From 29f7c41816f0789052baab3d876163360bf1b91c Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 15:00:56 +1030 Subject: [PATCH 315/335] undo cache --- .gitignore | 1 + .../sphx_glr_run_tutorial_dense_thumb.png | Bin 27537 -> 0 bytes ...sphx_glr_run_tutorial_max_clique_thumb.png | Bin 27537 -> 0 bytes .../sphx_glr_run_tutorial_points_thumb.png | Bin 27537 -> 0 bytes .../sphx_glr_run_tutorial_sample_thumb.png | Bin 27537 -> 0 bytes ...sphx_glr_run_tutorial_similarity_thumb.png | Bin 27537 -> 0 bytes .../sphx_glr_run_tutorial_vibronic_thumb.png | Bin 27537 -> 0 bytes doc/tutorials_apps/index.rst | 160 ----- doc/tutorials_apps/run_tutorial_dense.ipynb | 165 ----- doc/tutorials_apps/run_tutorial_dense.py | 116 ---- doc/tutorials_apps/run_tutorial_dense.py.md5 | 1 - doc/tutorials_apps/run_tutorial_dense.rst | 229 ------- .../run_tutorial_max_clique.ipynb | 287 --------- doc/tutorials_apps/run_tutorial_max_clique.py | 164 ----- .../run_tutorial_max_clique.py.md5 | 1 - .../run_tutorial_max_clique.rst | 354 ----------- doc/tutorials_apps/run_tutorial_points.ipynb | 208 ------ doc/tutorials_apps/run_tutorial_points.py | 156 ----- doc/tutorials_apps/run_tutorial_points.py.md5 | 1 - doc/tutorials_apps/run_tutorial_points.rst | 264 -------- doc/tutorials_apps/run_tutorial_sample.ipynb | 151 ----- doc/tutorials_apps/run_tutorial_sample.py | 154 ----- doc/tutorials_apps/run_tutorial_sample.py.md5 | 1 - doc/tutorials_apps/run_tutorial_sample.rst | 276 -------- .../run_tutorial_similarity.ipynb | 392 ------------ doc/tutorials_apps/run_tutorial_similarity.py | 314 --------- .../run_tutorial_similarity.py.md5 | 1 - .../run_tutorial_similarity.rst | 596 ------------------ .../run_tutorial_vibronic.ipynb | 161 ----- doc/tutorials_apps/run_tutorial_vibronic.py | 134 ---- .../run_tutorial_vibronic.py.md5 | 1 - doc/tutorials_apps/run_tutorial_vibronic.rst | 219 ------- doc/tutorials_apps/sg_execution_times.rst | 15 - doc/tutorials_apps/tutorials_apps_jupyter.zip | Bin 68075 -> 0 bytes doc/tutorials_apps/tutorials_apps_python.zip | Bin 52079 -> 0 bytes 35 files changed, 1 insertion(+), 4521 deletions(-) delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png delete mode 100644 doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png delete mode 100644 doc/tutorials_apps/index.rst delete mode 100644 doc/tutorials_apps/run_tutorial_dense.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_dense.py delete mode 100644 doc/tutorials_apps/run_tutorial_dense.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_dense.rst delete mode 100644 doc/tutorials_apps/run_tutorial_max_clique.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_max_clique.py delete mode 100644 doc/tutorials_apps/run_tutorial_max_clique.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_max_clique.rst delete mode 100644 doc/tutorials_apps/run_tutorial_points.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_points.py delete mode 100644 doc/tutorials_apps/run_tutorial_points.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_points.rst delete mode 100644 doc/tutorials_apps/run_tutorial_sample.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_sample.py delete mode 100644 doc/tutorials_apps/run_tutorial_sample.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_sample.rst delete mode 100644 doc/tutorials_apps/run_tutorial_similarity.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_similarity.py delete mode 100644 doc/tutorials_apps/run_tutorial_similarity.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_similarity.rst delete mode 100644 doc/tutorials_apps/run_tutorial_vibronic.ipynb delete mode 100644 doc/tutorials_apps/run_tutorial_vibronic.py delete mode 100644 doc/tutorials_apps/run_tutorial_vibronic.py.md5 delete mode 100644 doc/tutorials_apps/run_tutorial_vibronic.rst delete mode 100644 doc/tutorials_apps/sg_execution_times.rst delete mode 100644 doc/tutorials_apps/tutorials_apps_jupyter.zip delete mode 100644 doc/tutorials_apps/tutorials_apps_python.zip diff --git a/.gitignore b/.gitignore index df104e9b5..800d56f4e 100644 --- a/.gitignore +++ b/.gitignore @@ -15,5 +15,6 @@ examples/.ipynb_checkpoints/* pytest/.pytest_cache/ examples_apps/*.html doc/_static/thumbs/* +doc/tutorials_apps/* !doc/_static/code.png doc/code/api/* diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png b/doc/tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png deleted file mode 100644 index 59370dd40a3a2bbad477dc433467dfbbe0ac56cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27537 zcmeEuRaYHNur_YNHtw(?!GpWY#sa}LXpkM;-Q6{~6Wl$x+r~AxySw|xyViI9!nryZ zv(|K9%+yp@)zke{)r2X^OQE3O3y)se`-1yA@oS0qr(-t;F9rzgQ1BQqvU%_lOfJa@#c1Y#87kh8C8U8? zw4Z~-3}b;xWfaYyMg0+?p}<|xV3r0*eQJZ6?ph%?_j&t%+e9Au_!wJ2YGvigXq%SW zc~M?VZ7sb`hue7bK5vx=nVk!b%uHLy>#%gpJjdoo^egeBV+{k<1CBH)+2l2K$XeR)F z$T!0=Q{Z3U>g(G(quXE`@{4y>tRixqIQgy<5wN&^l1R;d_U=O6<7#oZiup21q6=Ejf zIZAb^@TiSlT0E&SBW*SNh$QGzlpGi(JN#BsxIRp}#OkmE$ybJQszn5%7SpwUlf)dt zzzLX6IrmVn;)3{lB>E^9>$~Cgr;zo|w>>Eg`B8b?#rl$%^k_DZ%-T-$>dX&+}XvkB{y%AZ5d55?v`t3kBkckXff2G*Y@(lAo%Ztc5eN zGbDa(`#a6=aMCqste>J=UTOv)l<<^`v{NI*zqboi8colIbJ6IMu>Ca&OXwL1P&((g z6yrP|{N|{Qo17@rVvI{{Q+exDLxas;Q7)_=IPb8zDllmNq5JAAI^GMcizy&^9c1kL zMml)*J0q8xWnVdwN(_~{^~&Vi6>WO~drhM&hqN$lJDpNyJuNoRHa*+fH~|w{EUG9x z01jHkMOCh=+YURfgI9%iMpGcL&i~oGd~RNRa<+7}Ewy4InU#M&cJG9t)D8Y;m?FpI z3yv*L3fUP@R;ZXG@Ro(W)$roUJkrOp0b5`9Slz*GFzZ}`e=>#(NEzRxU>FJH(D?%S zf?b0;Xp8K-A(Zlou&q*h^|5%BY|b76Dp@H3<-@q66BAg3S0R@png~u9>qe$a6yg(8 z&Xcx#x|f^A?B2QHyjICXaNk8QHclROo>#Qrhg&-reznc_BmG+{5Uhdi$_XPB4qW^!=W+kY-YTc7KxpQ#@Ql$jVjEINe*9gU zRdCWda&Tz3gy>CXr^NMt8=B3$J;c>&Y~sNhbA-$noA;8*%g6cwY~Rtfj6Z8lfed;{NG2W z^kfEIPdARpAfa)V!%#ZVN{2rroucg_?xA&{8vr2q_FoM%scnNtRc86Fmt^5U*lc zx1)&Lud?;e`xz1!OT6p1S84N1j8QMOLKZ35U^w9IT`(wfZ;N{aTA$US?pN?^T~<~B zVg6DX!?9Rx7V?lp+!cKIN`&3L(7kkQv_piQmEz$<@za+)x9WGa0~LM;N38NEpBep^ zeaAgmE!Tl3VwD67__cIL9{)iSYL(DPlxdi7QD!O!B>G%R{!W&oyT35O(m@^*Vd(@T zlq-(NBV^%dV@sc++r(Md;`ie8r`vAlo!eN4qGbeQy~Yj5Qn>33#s?*Lx!0BtDzbGO zdD3L^{*1!ys4DVCvIDLdi|N7Y_e@9JXM$CaITNVf6AOWv% zcpQPz7s_M*47`^mbFIuw=GW6iw!0+$^4FI?cP}M1wl#_t=9AI?EX4O!XZns1|G`!U zII&o4AnacKy*eUi-f!PkwP-O)@%U8q64b?UUn;P z$`6>L+mL4d*y`KMQ&!bb))tZZOQ#j}l~V0Alk3X7*U602oX2!ys(o7OY~ap2?k&ip z79*c*@NEZylwEr<^67QGrRk|cIaMg-S*~&s+RnQaaFc>C2I|=W)@JqB6WmwExxnX> z-;+@Kh8t7Y3#$xZzWct`+BD}=fq_5nb zmbVzAVpxMX%ga^il|Ax)$qM!{H3dQNzU?m)f? z#=e&5r@cPkvU#7kkGbCVNY)cfWi+gN$ytQ0<ds?mGm zWR_N*m-+9#6T;fLDkYmA--Bu4W4#8B#`|^)^d5bl^N!1Wl1%!g2H0NVFMi?j`aQ!@ zl+t5dm&A9s7_3nBl=9B|-E$efxUCW5^m5Z2}^f-g)JU2= zu^>z7Z{F@l_*qrWr&f3M%@TF?6AK`)o4$ zXZTTNe%wVp{X$DLP2T~Ofi@;D8LeDDNb-3Z{&6oRBFwrrD0Db(W+zN*icXDJ_LKWs z+`(l_*MSp;lJYc^+yXum{=K9?2w!9Pjo;d{lkK_tlS==kc*Tip;ZqZQEL!RKyyk27 z`Sh}Y!H*Y?Erm~a!hZ--9onK)kU}Bt>GKQU8w1uhh>$7KbkOtud>8W0=~+ui7Va8sDmc`5J_4YYLR?Twr+mz8^23|(d*yo82OzmjUQU#G!$It#w;R>V?IZ4*w_JAD_A5;-L@o0% zN<*yibAYkBUd-q~M&j1?-@^;p4hH1ELq!xtIZStFqJK5DLe5MMb@*kyH@=F!d6+t* z+&unlytj+r6S;#95bmrYeDBsC$&{bJbwC&XvZWC2KwAtJO0n?FSD2mihVB|AiJKnf zE=hjXQ=6)j7{`d_@XsPMdr6d>WJYc5ZvDDed)6a7JO0RBcD6*Q=F|N>dfQ%6Rnm?q zH;|tYK6q(C*XevfGHl8UHwGW%I)m(kCmF4m2_(z*rQV(y6trnf(rQyMf6w0b5K004URrjJED|Np{ z1=R@XWsmb_mo>2em6!SO!Z-E%h0NUU-}(j?D%vJsi#=~GwaNzZCJqhL`wMAjFlDkr z7ZrQPUs1;O+O??jspgu)*&|@hRcIarAczSH@9uW9^oOh(TfpW9gh@7<2NCF_Z>>jj ztfR*(Fe;9|;Q-$~inEBEJ;)JD5ZMH#E1E7L=U>}vrJZ*cr0+ZnuVh@JgE5KDktcT&|=)ViiPp^vwk$0=JqoN1>kV zHSxH??q}Axr}&3@LQWoY{%A3CLd9U*qqJf4A|9%P;LwP2hd!qX+N5Z@yqUY6D|8Oe z`1rtqTY9aJw<+OcH{*s+7j|5?`@JV(@~+CaA*!a<1<*1_>A@*LB;ZJXxE&k*JDtIv z^-IcHPW9jdY#*yKaiLXLL+Dif)aUel>Pj)p$5_A7)P z!TmdwCTrXb%kDcTD=i7JPHVoxp>!ZlY`;7+cXBI;tm<$c>3HpYUARd!+bzPSF7^OE zJBiKuVbyWA-G8-yTO+S#6UK)q)L+^nAx}{CD^-gv966!34n0HW#Np52X2Fq{u@-N+ zLm;WLaYnsck>?yjy5A?II3jl`tr`VG+!`ZCCx5MRb01|>?R7xZ{s=2*3hgo3(rOZF z;^n2tF7kw`LBU5sT0?7EXO7vVWAD4F06nEyeb{l-)^=e(=uU+bujI9Yx)krgyA>6l z?EK0YHhh0B5f_1)&AhF%5EM}&c^+{jO`lza%yggf(`LK;2Sy_0t$y7Em_ag!gYx9N zt9!f_{pI?HMet>lDfw+$ z=lU5k@Po$j9U=B(MG{{>@e+rsKRnZPA>8@%TS~ll$-DIN~NOQIsm#z9#mBh zjNh&^pY;3J7(syWv8x~NUkmFhY-P3r-4{#`sHRxM6%t2*_0-gew!NV!#j<{3txB$( z5Kz+B9PQ>NudjrD+Lm}3cBLw(_Y7>od-fhoc7#1yR()+;Y_iV_zlPL#zI@>pV#+PD z%SLv?V^NszfyD%fWtAk`^|WOqHSx;$&|zU%<@B@N{$7(ZDY@_X{UO02EO(wmyGsAz zl$pDL zWeb-=2<9hOGxIk)cSP0GXV`X`&r>VSK=1iUR{5pqIXVNMulzH+#=I@7SXrG!i|KmD`~lK2bb2wN;Gv{E(z#D7MY4%#w0`@#ZHo zb#LM(u9F7A(vMgflTyrnINax*xK*8be$J7c?QQ{|ZkOfj?uR%eCezn7 z%M#C)ne@A0{OwWrdwn4*ZkDsC_Vmf8Yi~?wcj>xvkx+Ec_e5Ui$`Q?zz~#!Mq(wTO z)*&~Fo#JDcEU>dDSo?v+USB3lR;+KvinLR~P2}Bwzm#Imkkyc~FR%kAX=_hx%MG_B zCpuj6mc1A-BKD@8;;dJn4?4z{9=Q+nNU6wQ#bQr5zS+utqH!LbV-G~Z z;sC|TlE`)K!{;95Mm{1?mZCA&Sn?SyLSxhPnp~8hbC8T@4)cQ+pNEIvayb;h4#_-U%vX(Uk_JVpH18^m7LA~YP=nxe4l%Q zJHP?oW=Nrs@lTY*R7m?BDAkxxtLwj`SFbx{w-iNcB#DxD9s7>J_kOg z%w0l#ZyZjPn*I3Kak|Z2H`&b1Tq_OQ&(m5{jyg%9=YOs?jBuGfR&awWq@PzMocj$_ z3ZZg;NEj3gpMElO0 z+RoK)KE>*5&z+FJbyXq)Va5u+cxIiDv(}=6gwl;Q+h20_q>ThKDCML{FoR4EgU{f2 zF~NJ_Vaj<)MUReuB%OWzKHJ?Zr*=z&(is2AK#J={Q?vPer+emFEfonKS{((U*;HRv zsY-e*6(9B4%r{G$#4vehNur)5lfAX#$wSOO5*zeVN@z1IFXn%c1*KU=BJ3mY7lZK# z>U#S9uINr^RfvCL(e}ZLEYlNCp~LT>;3kW2Da}&WH0p4Ha z=pS2SdTdre+Tqixkv4yVcq|1DfPVp1V4eH?K6eaX6oNb8JB9I>`OP3mlgTJkGKo9e z;d%z$2ng4&-14>=3&kd1uN6ua>rTP{!T9TfnIt%}?R%BX!V59kP_3h>XxUQBgxl2z z8mpX#eZ1(6wZwa>O0OKque4XXN`1aJKSf)u@dO^TZ$P^1mV>H=ZWQ7?F#{~&Zr@kg zGwd9))ygDg94Pqdqaf#O4)!Y$Rby1z>05phZx6Go7FjF_^0`H-v_G^HHbA(lPt(g83YcF$?@s>@J*vP$-&4 zndT2+3G)RgvVoKz7JkzCoty8l6?6a}krl$&&pUNTtOqqSy5NBE4ZPB%W=axTjc$l<%=<f1_8&N9)%E|R;NwP{b^;XXU8SDTP^PvhwDgN7KJs8>RL&9u)g3^r zY|5pPuaNF%5&S=(nTJdHZ_vic$O#)|(FBtU9x!+KF7|$APW)J_7;tD9^%#9CVAU5E zV1kJD=Px5G8iBNfd=ItX4)P1y_!&ODwUDx%qSjQe8N0=M!Wt9kR1)&j7Uf8*M9yEX zc_!*%A)qtC1bhj_G$pY_A6t8lJ|6?RDDI&@5J;d*#?mp%zhdS^3xVQek;MUm-oP1;WOOWl>O4k7NSZ+3`HYD-H) z8;2BlQM585wWI2Dpj}5hVm1q6{6^ppE?V2)=LwWEMu@v)$h=zq;O*a^r(YX=3+{d3 zpSe~O-y^CzpC^jBxCH}YDNgyTvQWj_6&lCM19?Ml7%brt2GM0dWTZ=QqaFd< z4XtwfdbfY!TEu`;%{i6y=+OT9PQKpu6x-SU330M|&Q@}W3zKP@Y) zVDkBwi!OX7E}@VmMt0Hs&jdxK(zUp<0w{<#7(^66%3kR|uRQQnBO>6~nFs5u-R*{+ zrC{rP2Ve@ihD2^U^y5!DL@_{8Ta3Q)&`n!6hCYxUgvE ztq$Z5SnQ1D+mx^cV$e&NCAF^75QH*N!&kze5pJPm$sc%Az_^C><4#q7W91(otWhZ* zkM)0v{x;(3vd$XNX4ax=+iTTfNKJ@=j4d250i%8!z);+7I@^)rnO++4 zy)(_ovj+t0`AOTcqvnApk}v%9q?3Y_)hmk4Vo3OQd+>ZU^vA|TW;;g3`QshLaP})i zQ-jS4P)a&>3?e8i?6^~+6qFS>r2e9-a1m3hZ8-P1+^s}{I%2ijz1C}a^dCWxvUF8_ zuxf4TpcDU+;jH6iGJqph$TDD?oP}<*eN$}Dd;Ja6RnBscAV$qc;HeNSZhS1DzH!|3 zy>aXsc;F`1Ah0qZ*zyt7si2>}S6IJ|_dIzBMkHfehY#{Ojol>78zZf>cU%@z#uDlk z2ltGfEyJ?j`>OcjH-U@Ml}+>6D0(RxVMA{uj76)<8thI5^z@X+=7U!|q|{I-Jc<@^ zcwc?o@49B#qevWDEm_VDuDVQA<1;y2?bZA?tee+u@ZG=XX-7*VkdYN5snuRL^t3+* z8zyt&$nxDci;sHVXicX6X*dNWssB996vx^*&mchV4E?pgeyVydX+)_ZAO4Rw0d&+7 zG_(e=M#PNkmS?w{dbYY!^$TSP4L{21+tjP)X~k!~bQvU^)s5_Sl!S9El4s^bo@vXhtC zfJ)S#^qSgDyGdO}NmN3u7Z8h&euhe|P^J7Fm}7M3^JqOEZ-#YY?muP2(x=MKC}h3n z2J*vk1h0-u>3+bk2MmXrQyblMS+z#XKpCY@jJnep<5|Who$E!HIWdcN4c%xKpgeX8ne#T;hdT$G!+ZJ;I|ax%625r2 z&?J6d;dff9!D_Vd?-Uo(#e?X%vf<6phOJ4i!|&Ja zcRYUX+(0iuyy*g8C>FXonrgX~p$N&Kqv3hDlFU-9jG38TPJUC&kDbP<3DM{JEfea+ zud-atwGpKW_nCP8w)C0QF}FnP`uZO-b_9_7 zwkH1U+jB4D#iit6R=T;du0RHM@+Tzj6|0%{HR9hVC{p=YfA-~>rPmm)6xX=%lMs3( zOBr_rl@7zDJJ*J?PMVgrh1&Y82ph|Co#(FOEOw~sx)fj)pv-P($zGK*3=Ki+7zOGqcg>N2}<32q);2FF3`Bp-xZ5i1Jj$Ly?hpULl{zgQl6G@7JZJa{EwxG@j#4@M$ z7y}#~8{z~nI$f5hlS+40=Y&6X2p9(+*R%IxQ&va$dj zAlJG#2j3pHtWD;zKJ9q|vv?rN!7obM6%YK_Rr-`IN$@o4J!MxVUs^=JLrr}`U-iId zDm1vF03ozL(4|IURpG!s+a z_9FT-LyLu8HrYPACT`zw7hdB9SMk2TN#I57biYvy$<_5m*K14 zYi(pN^--;(jqozs?JSg8=Dp8A~3TE(AFQW(-~|gkraGmwmuSQZak*a-P^EBrSqG3 zq<_u_`KnX8#s0K(2aJ2m9*8>-7KKz(ia~OB=}SvEHeqc;2w3mYx|u245H4qjdxOf| z9`>iUY@lg^S4VgB=zIk}v+WucK^}?o`VTy8D)02O&+qZ1YAx9VVw#uhMYK6K^kTY; ze6O~VVRA(xdCL5m;p!4gat}Dzzut=4JeJl>J2TG=q~%4RL@;rH@}u&RuHskNg+Gh5 zk9ew8N2oh@GH$IKouo<%xt|Ds;Sd%*ayyYV!+PDk)G2)7W=DIBim&k89FTl00>7b2 z=-nT1ueR^sK_5{Fo`Do`K?mE6v@0YQJF}N@Z55L{e)|a=@p}i-Y9zyP)&9?Dc5}z) ziEDcIyYl6^=?~#*mjZa53O=_AWsIK84KV_MtGIXc*-p7MjiqE|jGfImSU14U<96Ot zbEm!MQ|2{sI)3ugozzi>CY;yMX6S~BRucACF;%x1-EX^fnUtC$}DmcGRi`bg?Bzr@F^6_bY=rX_@`Q89`Z(T7lY8krU96 zhR58mIYD6Fr56m>bKN$(fx)@ia(IC`wJ+x<^i?sU91eXR%4)|=UvoEe1Ls<^zJJ}% zD;gjabi*<8!kL}vwQ{=>&HoT04b`vHscH3PORmn{qT*%*pZg?zo_`lZ(6*57jWuJN zi49QOwzg;i7$qC|`E`+q&8|#D-B-2@w@M*NsOsPrTsM{qYpnrdR|ClPcxDdw+EsBj8p~R0`jel&ON3Uuy1&WQRscko}~AhFlJX z0;5W2(@iaFg45zJ+i}U!t#8FwsZNrSQozsefqCtEF5lEX&LAQHfZM3OSs2n@H1Dys zvr=3c}>O^$K7a9fFCw zoK~3m``yy8KXLqWuaAzlSmT3zA#t+%y8{H4pzh~&S^Mi9rd!>NSrybbdN=)hz5A) z&!Jol{M4_2tG`$DGOFOZ*OAX@(Ner$$>RwsZb*E(0PTURTMT53eTy;10>_ZJcU9qbw9mZV5 zygY7xnuQ%JPZ_Ptrc1zg-bpW!htd~G*Cj&qmUaKMerJ9iPSI%C=mK?5feE^q;vZK` zPUmz0MIJEa0v5Ww0Q)ULCvD)Uy6y*{P_4-Gf&)&TFj^!=F7y?U`>di)r&A(Q^;MG4I<=|LZIoTeq{m!B+ zKt;6Yn*>f>Su;NPhd6hhMRmFC!)e**f!`=>r=ehUJcCX~b+!hp?@;HqQiOo}Y(7pkd>@a0>E9hXi;ma^H?>Aon^mK_f8ARDOIt-s zGBi$N9^AQv|eqU zQ`gKcLdaVwywMsw>6Eo__PQIt!?OCHY=WCEP^c6priAWz#V)UJ@k@>HJ|O5%@`uht zqJ~e_%cb6}X`YNZCBCJ4S~j~yp380cU;*=fIrs z4^BQNB&qc6iQuKjv>IwhMdDhYNqslDldH4lZd!Hwk;F$V5F1+9f8?iHHm>8!WgJ0S zVR$J<)`AW5L||)HHey4eN&~6%8C+P!{Hi-0KWXS06Bb-udsmrr1}aVqQvq4^4Q&seegE>dq+`1aY0iPEe=8AXgMjYO`={z7y zfu9*``q-Cr*z^(5KFIjTECk+cTbmoKL2k?sqY4KPaM1t&AQDF-;}U%c_y(&}m~Cg^ z4azNbNNhAA15?Mzcx@|l`ZHU#F?gi#r(J=*o-T0ZNj82_Nk8%y&`hld$Nje_n*4j2+gz`NefY++}3 zIB3*KyjXq6NZnC#|7IzW4k%(%0-9UlX(+MUJBB3eP%jUpamS3vRDbYlF=46wt-=g| z&`OWiD(*F_sk;3W7#s@bN(YX@AfCS}g=FEnkj!@5&wA{g<~2mv)iN93>_3~5ImDQh zmr9!eYC^SD8^8C;l?$J*#Wc~G24gGYG(zR3Fwlp~iX2Tugtj(q5}YZ47-FW7l*42l z8DVg(0b_oQpVHK*f5v0xwx|<@qligNg@(Et)uPCe+laD$B5B({Kz~)F@u0gE`Av@N zM^4nXZ-7^En%msWNqwLdno_!!<sqW0^L z#ZO1nc}n9zir*5+sKDOOy+fF2NYHAHnuKBnOkWJY$m4>BP1RdmQ{5Iz%I7B}<;kk| zree%lpKz2jD0Yl;bU7^jZ!k~umf#NqUxEIhc5ETq_6T|E&!zEDahKo2_EEM zJuuyaGqWuZ4CQ&&!`2u;5VZo4vbG~Nc(<%(lT=Nz;32%5 zjS_&$jsFi5l|?DaoF^r0y0@BfCIDqZX?=3QO**eHq#k(`*2ZX7t2zt#o%55ykMk@J zcpMi_MKw4Fcsoay#vMcIlw3fcVwCTnX-KHklcOL!Nl|6*rBFAY%>QrpI<)glll|!W zauz|M_^%nPAs^~v^xIPQe}*P)L)Nwiu$pjlzF4D2Ukao%EoCyFps5jw{xNtjbq#Jz z480QG+MS_#Tfuy})gwdfgMx45%lr=YgJ6bsJAdo@P)GudbS?3>4VVj)4MeqB?a>_V zb;xI`>n8*anfP*6Xc8`07*{M;_00>+k!0R*}Vj*zGNS^jocn;7YG zA&Y9OirTBRl|wUwFtAYZMCOqPTTTdy+(_{so;&|H%Z>1Cib-OpS$KXs%zrxgm>tOS4Gd)G@t^=+f08k4Fe!Qo-2Gr;u1YDO-7<=xW7xsj-O*AZ~54!ir=dfSzx+laUk!PPIGm{mn* zY_;TxVPOgWykgAg70Y&XwlAO*v{IHMnu`ywRrW>8!M{AkmJTd7C}|fKoFt;aX$hk% z)?iktJ6jzV_?<)BR{$$~0=jdkSC{P8!epoMnT7nTd~#w`sx*7cU;27vjy$)K%R>Gj zzns|~B7@8lV=)0m0M;n-a-z1*s zZ*W>f+u|cCEne+--2AZ@n)9ee))~bo7kK3!$AJB?k(H%dH$d;mv3IfNv}GmURj~ zb@}IB0IpF>!h5XzPivwkQ3UgOgK1^urOQNQ^9Su7sbl36Gw*J&q_6NG$;pZ$(^YpC zl#^^4aM4?{SGBu@zMS&(OM(H2MBw+#s|(jsb71)hKGdY~_|VDkh?G}=x^Az# zifhKec9Rxwv|8@kxn{HT7wa{?BDErBqC%z2KEhuXJ=z>q*bjiN?;b#v%szEySHtfzi{O!Q^J5Iu2qAL5b0$}Vn@4^b;Z-2y_QvjcnTnF z8&k5+3#7J0Icyr!CfmpgXWcs>sUL9>945~G%JZTLg`#qL0J02anTPX zTQ`m=ARF8sKM_bDc@HPn_aHUPDxBqs8U)buM?Pfmsz`e>6?^-8=ATl2zo3>5XxWJS zcT00tNrgl!EyxWMunzL6|F^MPz>6UMrah~J*x#tT(%(-KlY3)(m@bV`&dkwDZ8@#` z!^B}1FHKkUMbOYG&iv4dTF;X8atF!ynbH?jXHiz!c}+7v*BReTLN;_O1I+5#$?w)7 z+p!rB@`SrDlvYofa;zu7IUNQl<;zccKTFumM_zdx;i0>e-Cu};keyZ&mm$rSTLnVJ zBWEzU^s8-I&qsVM&DI??9ZFWABW%iFix!WrSsX8eXUE;?`wBfmiuA9uT!&RF)|AsDu1gIpm)tQ`Bc#NTSF; zEv)a?(i-lQ(%Y}`eg5>F#+2B2-;*kMb1V>mESd?eozMUm2lC|i5;fj470IdEn+e-D z+>o9nEq1!x25`rxMHryIWF1vbwgJZQQre=Hl>TX}jPO%;6ahYSxkdEzQKtG z4O^mk6ZxnrI{Z#fXbch@CNA(3P#P(`culW>rD@9HzNWoMvK@oZ%_Yv`8@!Ddqf2Hm zzt1_?EwTnzknb$mbzN@G@CG*_DV2M*+=_mC&#pAm?Yf;TvqoNcobx)FJx-p?krP54 zaK!$v>}WcxPD+ue$K8|Xf=q3{@lf{(tLzgM>xe!kGlte|I$lg2CENb(iX^o0hMVq8 zfZSw7qK4Cu>SiVJ65{Kf@KJY%_wl#Y7BFHqYamLVSU&TXuFEk}M|q_z-j#`%-OQ#3 z(4wvPAa&w*%)3j#@3PiXj^}+1u2XXTAIB!8Z-)mNCxkOo#@_;E0$DSXw&4{m(S&Co z&*DVSqbt^hinDKmy>sT9%21hZjr^de;=Wxd$JUY=-k^nOZ!}(Y8Ok~d5uNX(X8D+Z z&d6fC>ijeOt2l%|UP|uBPhg3w<_L>y{vyR_x75idvE^=#wtWnlP7(s-XZ$T36fC}B z^wFUajjOmmfKcV=!FqaHi9r6gxZU0H=3H{#;>r-f$^t)xq>aW|4ilKw%#7W-PdDF! z$4w)45|a~ULOj+H#gVN%tCDky@hJWRKh=b@TFHEKvT|(EUOibdAO*YaHVNB27#~iK zWf~Sp(KwQ~a_g{Gf{$TIdWZ3zaZgcYyRV6X-XQIj5riN_P=)g)=?hGVuI7n)TjQSE zNfO&4ggR8s**vl!PHr8IJ}IoUXETT7VNzJGmia$6VT@^7{}JDA`^m%bHNNyx^TdWD zG}DkpfD@4octCsGTM3b962QxYUWI=FrE?DD>66C; z_UBgGkCsQe1{B7h)J=a!TN*vLr`aNGtYf*KlW@@lG9+t_isg!>#cLJUD@M;(sx2Qx zw_aD+-zM70BcxwtB{1nG6^oxik(a)qq`Jgl)<&8ha*zQScAeEkW}Id-(7A zpC2map1?b9FdaUXU7g2Tft@#k}HcKgOdKbK0eXM(M+mwuOmZoi-T;Q}%d(Cs!a#5H@H>${*6} zQ+_TtGk6+=VI8f-5gQ#*1-jlvX-^ERk_U@|iT(@+axbUqWHI$bL`8PO^YQ-6rdmwG zxh01OdnGW<4x}@-y;yNEHI}n-2ZL2iU6H>=qu|irGX<7Nk&y{D+Ki&#Qd`6_ipPRE z%yPd^RNZzwUh;aoam*{wwQY!Uh-xpvF> zqZrFYDWv)lgK=CRNxA0rV^&AFFRtqJAQs?dm}R-dC3y_T?Hm^**2YzPmS~wQC8QkObD+Matuge8`|>gr=)H? z;1~|5;OKq;ARP9oVpJP#YCD< z_TtHYbW%##p`^e{!YReXfEBj=P~ElZf2Ti}w`Rl{9#JXpG+vnR^>()XZ5c?V`i|D^ zhPN5oMSG@MP8PLUieXxPg&*VMqmCIDi!9~4IJz5Y>~1o4^1QBf^7=SqqqRY&l!{yQ1-Oz$;*()*AUwCRpPU0X#dtR& zV<+gZg?+z>@p{Fgl}ZlH{ZTC*7rWnAD+lowQAMV2Qqhd8Lu%&va<;17-Xx@cP{vLe zaVnOLW^1`{`u&U%?+%0H^#ogVllp;Y& zad-FP8r&hcTW}Ao+|N7j_w@tT$ILa8$(+}mhwj^+dFRau9iRR38z0KM$$JzDY>Y33 z){cDb(RTr$!9J09{>Pual{^GKgfeJQdM9NFyEb~tXf}(zS_+S$Fd@Ktj@!Y))Cry2Wv3n>~119^xHRNxRPj4Ex0Zs ztlzC%(4k|l&b88BdtVd3Dn8~YiKNyBh&6O-u-I#V`eLb4v^&YP%COj8yV~|r1bsi6 z;TG!kH&wT*Q}xg55%^M#PZu{0I3*1lAtaxRs@RLhZP z>Z<93B;Z^5b}_6tG!pMGP44n;Cn_sXk(7?E8lfl3;j25aRj)-n4HPFz2h>;op_78R z%ZkpYE`Cm$i3aJ{XyykM(u`*m_~zSZp7W|Q<@^5DbQ>YvIOX||=ij05mVqR4zC>0H z;bDWU(b>`1NuzdP^V0)UjqPJ)#{=bcfiCBAO`;+T0m#`}4t)!l9Q=spYGox_Y)fs) zXkcO9Ldm{<DQtb|Zo6o;OoZMx8|I8jtI%n<)or-HTxfT3!kS9Ji~bgUG^=?o z=R!fL>zFDpfxYDvi*^!}=2ItWi2}k;^**D`O0a8o`Mj`D$L*Y4l+K)O#I!xeN62T? zG99 zxU4<@Ik{)|fUs!j&#)4&6>oE}yd!Cro$rv2UR?M5VYWqG{DpDr+4h&Zm5+6A2O}V} zgK)*t(~R)$W47a!kdbt00zOF>&G<~Da+$)v=R~E@=KavX<4@!H-o6|1%Lj0!cTmQ= z{=dI{E~ytamqEB3F%^NrTdBG$2|->9T>@xqqr*@@u@F9rztb zG8u1aTw2lG<|+B8k)t@_xo~*Iu7uRr+8t8#veZ$R-!?d@y{aatib{I;Y-rDfOuSG zRV~}+8qCf*l}j8$J!Y%b(=L@{D`bZ?Jw1*G4%Yb+PYrH_q&ETnwIDAG-&diSEI78k z{BoJo__ALrETs$L`C#+Uy^ASzUi*Hau>XZx!w5kDkGpRL= zPL%H24Q1g)nv03eOm~_1gC@W%Ns6YXa7#|_@xo5k-A}4Ys=j$Zh(qfX{j~af9|KP#K-ut+(NLHGu9$YpW=_91prqnsNH>}E;{#Vk z#cZku$DNC?$=z};5q89rVtGH3t}=ourNS*M#OK_t5ta!YyQj9@p*!Ua<_p|CP{XmeQwpO3=??#x!#@M0Ee`N{e^9Ad&U8s}Tu4yTK--aR2)Mv8&j( z2E6Ofugf=Lu*yiWukb|~oltIRQ?}*Ie+dhuF5`dviKS zOt?f}W?&LX=ij0@;znSbm=1`%qunr$_10`ua_1YXE`+R#S39 zCp>%^N}m`WcRnPn75a2IEXo5C-c($F5Td(r(()}sbAG)G3rW}6lVtdtdsxBEste`F zblZ=r99|Ubs`q;EuJRZ+_FJ^`nJu6$@37>|TNa+o^nYZc^Xb`4gVPPjtQD=xU)?NL zx?lA_&>?dh`@dSWQ_;Lf#;`}mLy>pU@;P zypIgfiLuO-C(5~9tA7=vPRf3b7GC>xY>W4eDAhP4rzYX<$?Y3=6?5HgzDSJNl^tv> z5fW0nk>3S%TV~c^#%tLdrl*E!7q~Hn&Jrx49PTw({df~&l&9ZSx;s)V&yVelE}Rb( zlG?-G!Yg)U@20(rLPrd@KBOs-a;=*UAI)=luY$RT`fwzl-D^i|s|pysn^6)w|FjwZ*Xpq&bIt{k^bOdM*B<9B}2jt_ZIdvh>yzl<%p%5{` zTIj+Lus2-53Q9y66il{)*U1ohIAS_< zgjt39d#XGk^b*N8v1G>MO4RU9{MY7EDe-%NGcXHa?p&~Lr+*5(E_1ba{|Z;@((cWx zHO+i+=ZN{0B~Co^x4VPwYa(W%02SUR>g#HpNu;2Ik5A}+63iYRPN~0CKK8WO=nWceqNcxEgI@B-O zM9Q!e9NY+2^P*Bc4>r{xcjZKdxqfuFPGz{cE#DQQdGyajhvMX=qzl2H)AH4t+TipM zWZ{ioX!DPG!c3&*c#K~(gQA-o=y+GZVky!gO=!8yQ#{w;(dn>}Znd37G32+>R(YW7 zI8syzlh{^!@AM}K^?zVNQ`D}*RJKv$%jIByuPmUxQtKz;(B$lchs7}tDu94}&Xn@` z#f3^4^T@C7X$zpbq^pp+HH6zD)J(1Lht|dKYOaiQH-HFh37nXj4ZJTCcUe63)yS7? zKRyw3ze;GnhIRrP8lw-mnQ586A9rq1-BA40zE%0$65yF|bRD^NCAg74T<+Hc!K1@B zSso*GV7)VhWqr>I*gt<;V)*zUZnMy$(jPeaj~I!#m+1%?0lx)c5p{yCV1V1o->&A^ z2se$jI~m%^6}rkFBxUi}(x{*hGarT=AE@Ym`urotOLZc2@GxbsGHqQyt>+GHSQ*P` zrugeu&i=kIs4;SOPy}A6i0hkIBN^1maU#9H_Mgy{NFEku#F>)*M6Li=ej0!|vQ~1O zJdTcjapMlGkYG~`36MY-#)!;$<=rLW01Ei?;5I)6GhkVqTJV!!I;NIE`)Geho^^z; zZS-&w`}_2wpeTDst~wJmdP$G9fB`c!(BI-B>?)*t&j5?R-)G z&~)9+u+QH?@F!55l_N!=(3m6)>JpvD5Kyfdyk} zYQg7{k!Go{^ru+lDc-~mBC*BHX)4w~owPuet1{vT$n!;W`cjeI-rUqGNw4 zkH;(U)fo_y_VKewSOV&G_FLr+*>2I^)VnrG>)YE}H((kf#Jgluvl^3lR_1J4v(=g` zVtEK2W@4{H_Wy-pCSt@Q0nKASh`VdRaYWubvL9rGNL}knOiM0?0<3ZaZj$b5z#_2? z!jFHYalh{!ecjVwTa!1_dWxAf)1)m{Oq+D(qE^az-xoS-enyQ|nh`^*1N6;@AF}y(M1G_Pw)>6W*wh@Wv%S!6_d&^@)?M zbwyBl!n8w1XctJ<*h*`CCb1gQw(b$|?a~;I9l5c+ReRk(jIaHc*48$IG+$%epI7zQ z1phh+F;YDFyY_J^D^i*G;PJ2KVSg3IeTesuuh6dlp|)K5{|EqZgM&@dYjH;@U-k0> z+=a>;`*wE}j51+k9KhcU6Fwt7mMRUlrhf~#k2Jr$sh+}UXgeyg&H zGH|;6=p5Y*0j~tvNY(Yt0=Nc=on*7Sh)u)9Jl{B9CwZ%Jt9oO|64o&dvu(ETn(90D zF%X20S(TVic|E!fE@okigpH_QGAZmASa-?ziVDetf-*YSDvM z^v}xx4V}BgVXeW_06&IoLBUe#d3uI8w~b0 zw5tI~n+e{6{{py@B96;Y)^!h4_xpE?J%yG26mf~r={azzo<~OK@&6joch?lm9NzAS zeAeyrldrg|d`CDrKvXIlBu^pS-V`WBoN1401c_}Y09OY87$BNq`?-?{CzkE;o$x(W zYbxxNhSyd`sPMsKBKE=S`OBU{u4R0aCr1$hejnc@dpKyeQ(VN*u$pPMwiV@+cQD@h zpTr9V7u6d5#}z`XBCj32hn@(T6x^q>S`sQVzqD}~4Xe$3jr^XzkbD^k^ z!C0K7Zq&hzbth_b{w%SSS@`yEDuW}X++l157pgX8g~0P%|j zuX1!9?mn`dwa16vkiYuB_4*l8RuIs{Ex;%@2)5^A$hw4hc;KujMm{kQRZz9EmD+qX zM=M~=gD@1k20R=&XU`z_{Xf6zvtVv3&Bw`14eqU&JW-`^LZ4dkZK=!6eq_7 zmS%%wQ+>CcmWi1(@*qdYqOPpsk9h)=urZB1FO$4VgvSJmyVA&kwW@WP^Rje*iZee4-ndrLRD zdI&9?9FY!iubs?3lkxE>`28-UAenk7KQf;i+V6r+f;XqqeZa+_b8uCCOq&X*sxk3?SZNAL6 zb2I&HXWVbfy*k6(4&jwsfLc!Fs7rt3%U##dW)!J8Er!2ukir3NYr0lUOj=d22W}NA z{qcW5)cm~Caw$=_*6NltyHJ&Mk5DA*nKbE_UA3m@s+-s*^1ZzNPLS8gY+GBtq3!OO ztCJBF#BQW}@EOe{>A#r$A9Y-sR9`68lap;!&_#CM95^Fs4!7Dd&{zjxHqiqrf5!ce zr4S+_QRgZ?c??Mk;?a=FCS{M;KLdgd`#bL_! zGNZ)l!Jq=7KqR&fnb!;~-+i)TT1d4!{Sg7y`y62+$-GJ3j3i*wac~q+@J>ic zN?e0!)VaY3=n%WJ)WA4+bz;qxfnFLBG?PzNYX+|@n@YwJxU9{jdWTYDj|rUwaKXx< z-0y0y07tN3AOX3Nd)1ozHPBAkKePP7hW=!Zhh5@EE|7LRtV{P3`yHebd9K%@ON}#(pT-4E@n2ZTpuxWV(R>e%`BA#4APT_ zDAaq@gMz?JEi1SGA$6wYo)s~bN0ECGof1R4?V#RK;VUS2dP{I7Pj4J$QoA1FR}s2UL&K`QRE1Xs z+*i-SOVtD?4FFJsqmH}gCGaTNBv?Q8x-izB)vfz!;#Is(nuJLP| zcZ4Xxz+`_UK}7Cwx1f^sb`R%C0d8TbwKMO4D}sbw(%{v}83}e!JlrhGWB7yR)jyZIxbthqeO*~b(j~P}2jG;E z+qPzG4fL!yyj~B6gsTcbj;X1&X4&)i&x49tqqZ!nfRN(9JX0Ld!V{{ z<==$}IQ9EKY*dmI|Di76wK{?PWUD1({khZ(nb5Oq;x&;FpXMVCSvfh zVCt5>M_5&34HCl7NA7=`ax#*94%UW_{w`a`RHqaHJlz}7){p<)gxeNAQN7*e`WDZJ zt<2lQ7f`+UaA`|lrS##E#qt)95IgK=19Elu`}J#IRnlugjwZFxu+Alqh1}=vt2|xd zIkp%3%3I`vnO^(-Jh(2bV*vq%JiI+(nJ8pDt z{MzD_S$wtMNVhKt6_cXdL0B;|^&-y+xgSq(mvD{PIvkTc6-dX4f8g zMCjE_8M$@{^7AVoGzze=xQbZd>kt7&bp1wojm>2Qx(beRvC8;5Dhc~9NOueY0F$hF z>rgxQyjOvtQN|S=H|O#+ax8C#PevM_!Xe9p-M!D@KFFa~-0!~it}e9KT(4+kVFCWs z5aGzx&zr&e4h_-1A$gRo!crnTkIwCmS$%n8DO!||hpdKm15&+{!M*;~e|{8;a;{8@ zdD{e6`N6k_Xp|syuW)1+w5HsX)Vz%Z*q3#pyg?#zlVQlj?)z~HTZ?QmoX3n@#e?2} z&%)gBOeI$Bxim6YHPz9gaJS448<$u(VM;uSv;@s&j65g)!EZ4z{)*gJb5JSCr>oz0 zlB*6=DLq?|{C;9O=kLt@ol;0@C;;z%j+)P}C=2anaL(TsmRtZP-50VbBj{nwkPu@U z2mK{eGtmHq6mQx80DkkF!D-v%jOl4o#YFoRZ+} zMNqiZa@zfrP_>m#MiPZMMEEx+J}Jq!Tl{QF%ON8@9vZfTccNE3cckn zILF=v#Qtf3(Cirst}^F!t+MTmL?xaixm287A;!UThSP8OkBI_~UZ=kF@m4|pHgRuS z>{o|6_6)~|{SDzXcvBZ4`g`@z3G;={7|1=CO1;*zMIZ4sJ;A*^4O@rPKdu}mMZ!h< z?-lWW3pb;SFy-G%c9ud{i$=1n&s28XT-1^zF;gD-7?()NICUsUQOboSxuutu%Ynh! ziem&kHb(fBKf6pn3*Gt3m}Nog^n6%zGBtcmB=zeBmTJ$lSx}O^Vv=|iy zyPG!qPcC95NjXTT_iv5bGSv5b$yFQQd|Bdf?!}?xGcEPGDXzf#+HT`up_j-XjMG1Z z#LGu1%2PkCYgavgMO!i1Zn@Zcr>^746d8|VA!b49S^l4eD!o*l&Lo~Ln$@Sag_4vi z)nqY*`2GV~qju**v zp6DD8I9G1c-!Oidlqg&q&ZMQxuvT6hGTWokVV~AXxV0zV{*}BNi;hq8Yde(E;OdD( zpH{D|yz#xcZtUJx$AKu0Gt;MioCc{oPZr9Ik!x+d>&a@p=jN~Q&T+W<=>2?~JakWM zy9a08wd~Kd>GeLY$SDUoNtm&_PYLvNGsbW4&+tWC?Y?(x+Dc(FYfu)Wd*|k_^5pmr>|N7q`yn5N;AyU!7)SFf<3 zM)MuO0_jeAm3=$Y!!8@EB7~qKL`m{^ta_!Bl8H=OfJBAl4N3Pnv2+tDT)nd!wqfBa zmXnKSRz3jf!1%z7?4i*}Gve;=T8ShBuuN ze_nc2LE)Y~<)9r{#BURhu*rK|zS~=13zPA0{(Sl4<8?KE1YpOJ!5PrUSjQ?0aBAF>H?Bq{+8EFFNE#C9 zP)io9tL={j0$oi?zOenawDOq>2)sP2GmC54K0QgI^>Qy%5gm9Lce&O6&%XOxHm!q5 z(dGuj?sTeDz>SFH6!)dK@oERRZ+a~m%CHmuPZ6+>)3=`_*Sx}rrtv=V1~I-M+)A^} zXjHb13mS`CY}&659FKh%b`-u8b4F`rykay1?{=@GX@!8rzQ?D@CpT{^6L!^dT8}(& z7}H}~^qNYy&<%Mz*c^fTHmk?i6CpYC-OAoEZbG?21_UWil~!BOFkzpS4)2Dk$#+&V zSKFt;&$T_9B6i=Mjo^0a=bq%pexTBoUT?-oNak5X)Ar+y+k&Gaw;28Vcz)-}FefJx z>QUr}M86gfF}FvN?Do-b%|*{cwR)kg5u4b1ZuBA-?~9<-G!#{BOKwH&2qr+;1d0XI zM+|NwsXFQWW@&W3#CgopuD8MsA%n+G(|mR7$Nl#(HOaDQ_h#;FHiJUG(q{J(GY2*x z5Y$@}NQqS%!IemS-pCJ{?Uy|gWYNpGZA?L2Z{WVS7dtGhnQLo?Jm49?r;n%!U5Lb}uk@zNzeB zn<>QZ|9r;j1$%S?8`6vD%qgWk-&;v#hHR+UMB7DzuoVi{xSnC16If!KP}Jo0e-cLs zAHbM*Zxi3%sU58iq;D)Z2}C%j*ls_W<7dE$YaWvA3A1T{g-V}RT&o3>$JFg^@r&-) zn^Y32V&>>((5+&p&U+)#rpXR|f?6x7^ZgG3+xJe!B&ob-SbU=pQwHloiNx-ZfrNJy z(EVG{4|sj#`{RI4P)i$Wslho9z&`4p9_Y28xSk0exl7pfI^IbWT_3sL&tbbRcS~S` zl-=gQ)ncBX#c$XT%bzC~g#)#U4nBYG2WPzNelq(>>EcS#9%ro{ih7gpv*}4K7w>t- z7Swjz;$SCRw*EpDi6hFrnkU61x8Co=FM=o7?1~G8M-&31f2Amwss&yL)qAwj=KH4e zZ1j|{T-z6xUSaEm91p&+7l<=)RcbON3wYoD=47D9vVqJ*I79$Q>~@C-SZ;2TuE$uz zy_3{%jvxa-4A8uq?eC<~8b#d!vEDg=!!vqjzs&bJ?_41r$86uHTJ}sAtkR`LGe$QP zS3Pse)85L3EaH;65X=LW<~c1d56VyZMhz_Bh{{@+%L z8j12?3(R+BX>oa9-hK(cFK+wD|3=~nP1z%~d)c_e0o|TWrPW6ZFz=}8q^XiJrz7Dj zkk0O-!`AlM5X?7jxP86unKGaB`kDHbRc;+u3yU_tAl0YBs(MRi_or;rO@aS2+Uv{m zo`Vro^8sY(-iofcdz4fv7{yt_j8BvlnGiFSwoTjo0bK%cys=r>{En)z;q4^n_3Qc? zYu7Dsz^tlTLgwQTkz5PH7P5X|Ag!Y%4ILd>p!vs(K_Q4VhZ(KJExoZP zh0QF1+ZS4x#+-rrHk->UTqJ0H27Y1Z+Nb7Op!;qamr2-MsvLtUP^AvE_+~*_bXCEo<|bgPe_D4| z%Q`q_VOie5YPNJMs1Y~v*l$s}=9Qw~p1q^Gbubz*xhqG}^@unhi7LAV!PYn;9|c8HQ2_E&Dj4D8025C+soWn1%k&) zom5Mt=u^$c+Qxp2YB!CfsODkod~eYXXt~>Ye4H{7){_;L8zA*ad1p!W%CpY+Tj-vJ9SO2_&3^{QG>zY~w@t8n_Ng67I pdUSMz&xnZs@8kdNX#^ldF?SSP9Nsy>|7r_CURoJYDQO)1e*nt+?5h9( diff --git a/doc/tutorials_apps/index.rst b/doc/tutorials_apps/index.rst deleted file mode 100644 index 6f32268b9..000000000 --- a/doc/tutorials_apps/index.rst +++ /dev/null @@ -1,160 +0,0 @@ -:orphan: - - - -.. _sphx_glr_tutorials_apps: - -Tutorials -========= - -SF Tutorials - - - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_points_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_points.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_points - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_dense_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_dense.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_dense - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_vibronic_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_vibronic.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_vibronic - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_sample_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_sample.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_sample - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_similarity_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_similarity.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_similarity - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials_apps/images/thumb/sphx_glr_run_tutorial_max_clique_thumb.png - - :ref:`sphx_glr_tutorials_apps_run_tutorial_max_clique.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - - /tutorials_apps/run_tutorial_max_clique -.. raw:: html - -
- - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-gallery - - - .. container:: sphx-glr-download - - :download:`Download all examples in Python source code: tutorials_apps_python.zip ` - - - - .. container:: sphx-glr-download - - :download:`Download all examples in Jupyter notebooks: tutorials_apps_jupyter.zip ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_dense.ipynb b/doc/tutorials_apps/run_tutorial_dense.ipynb deleted file mode 100644 index f1e65a218..000000000 --- a/doc/tutorials_apps/run_tutorial_dense.ipynb +++ /dev/null @@ -1,165 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nDense Subgraphs\n===============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph`\n\nGraphs can be used to model a wide variety of concepts: social networks, financial markets,\nbiological networks, and many others. A common problem of interest is to find subgraphs that\ncontain a large number of connections between their nodes. These subgraphs may correspond to\ncommunities in social networks, correlated assets in a market, or mutually influential proteins\nin a biological network.\n\nMathematically, this task is known as the `dense subgraph problem\n`__. The density of a $k$-node subgraph is equal\nto the number of its edges divided by the maximum possible number of edges.\nIdentifying the densest graph of a given size, known as the densest-$k$ subgraph problem,\nis `NP-Hard `__.\n\n\nAs shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph\ninto a GBS device, it samples dense subgraphs with high probability. This property can be\nused to find dense subgraphs by sampling from a GBS device and postprocessing the outputs.\nLet's take a look!\n\nFinding dense subgraphs\n-----------------------\nThe first step is to import all required modules. We'll need the :mod:`~.apps.data`\nmodule to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the\n:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to\nvisualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and\nNetworkX for graph operations.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import data, sample, subgraph, plot\nimport plotly\nimport networkx as nx" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we'll study a 30-node graph with a planted 10-node graph, as considered in\n:cite:`arrazola2018using`. The graph is generated by joining two Erd\u0151s\u2013R\u00e9nyi random graphs. The\nfirst graph of 20 nodes is created with edge probability of 0.5. The second planted\ngraph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the\ngraph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an\nedge between them. This graph has the sneaky property that even though the planted subgraph is the\ndensest of its size, its nodes have a lower average degree than the nodes in the rest of the\ngraph.\n\nThe :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them,\npostselect on samples with a large number of clicks, and convert them to subgraphs:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "planted = data.Planted()\npostselected = sample.postselect(planted, 16, 30)\npl_graph = nx.to_networkx_graph(planted.adj)\nsamples = sample.to_subgraphs(postselected, pl_graph)\nprint(len(samples))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Not bad! We have more than 2000 samples to play with \ud83d\ude0e. The planted subgraph is actually easy to\nidentify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to\nplot graphs in Strawberry Fields:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sub = list(range(20, 30))\nplot_graph = plot.graph(pl_graph, sub)\nplotly.offline.plot(plot_graph, filename=\"planted.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/planted.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view the graph.

\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A more interesting challenge is to find dense subgraphs of different sizes; it is often\nuseful to identify many high-density subgraphs, not just the densest ones. This is the purpose of\nthe :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify\ncollections of dense subgraphs for a range of sizes. The output of this function is a\ndictionary whose keys correspond to subgraph sizes within the specified range. The values in\nthe dictionary are the top subgraphs of that size and their corresponding density.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs\nfor k in range(8, 17):\n print(dense[k][0]) # print only the densest subgraph of each size" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From the results of the search we learn that, depending on their size, the densest subgraphs\nbelong to different regions of the graph: dense subgraphs of less than ten nodes are contained\nwithin the planted subgraph, whereas larger dense subgraphs appear outside of the planted\nsubgraph. Smaller dense subgraphs can be cliques, characterized by having\nmaximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and\nlargest subgraphs look like:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "densest_8 = plot.graph(pl_graph, dense[8][0][1])\ndensest_16 = plot.graph(pl_graph, dense[12][0][1])\n\nplotly.offline.plot(densest_8, filename=\"densest_8.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/densest_8.html\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plotly.offline.plot(densest_16, filename=\"densest_16.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/densest_16.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In principle there are different methods to postprocess GBS outputs to identify dense\nsubgraphs. For example, techniques for finding maximum cliques, included in the\n:mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find\nlarger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to\nsample dense subgraphs with clever classical techniques. Can you think of your own hybrid\nalgorithm? \ud83e\udd14\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_dense.py b/doc/tutorials_apps/run_tutorial_dense.py deleted file mode 100644 index 964fc4c29..000000000 --- a/doc/tutorials_apps/run_tutorial_dense.py +++ /dev/null @@ -1,116 +0,0 @@ -# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports -""" -.. _apps-subgraph-tutorial: - -Dense Subgraphs -=============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph` - -Graphs can be used to model a wide variety of concepts: social networks, financial markets, -biological networks, and many others. A common problem of interest is to find subgraphs that -contain a large number of connections between their nodes. These subgraphs may correspond to -communities in social networks, correlated assets in a market, or mutually influential proteins -in a biological network. - -Mathematically, this task is known as the `dense subgraph problem -`__. The density of a :math:`k`-node subgraph is equal -to the number of its edges divided by the maximum possible number of edges. -Identifying the densest graph of a given size, known as the densest-:math:`k` subgraph problem, -is `NP-Hard `__. - - -As shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph -into a GBS device, it samples dense subgraphs with high probability. This property can be -used to find dense subgraphs by sampling from a GBS device and postprocessing the outputs. -Let's take a look! - -Finding dense subgraphs ------------------------ -The first step is to import all required modules. We'll need the :mod:`~.apps.data` -module to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the -:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to -visualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and -NetworkX for graph operations. -""" -from strawberryfields.apps import data, sample, subgraph, plot -import plotly -import networkx as nx - -############################################################################## -# Here we'll study a 30-node graph with a planted 10-node graph, as considered in -# :cite:`arrazola2018using`. The graph is generated by joining two Erdős–Rényi random graphs. The -# first graph of 20 nodes is created with edge probability of 0.5. The second planted -# graph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the -# graph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an -# edge between them. This graph has the sneaky property that even though the planted subgraph is the -# densest of its size, its nodes have a lower average degree than the nodes in the rest of the -# graph. -# -# The :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them, -# postselect on samples with a large number of clicks, and convert them to subgraphs: - -planted = data.Planted() -postselected = sample.postselect(planted, 16, 30) -pl_graph = nx.to_networkx_graph(planted.adj) -samples = sample.to_subgraphs(postselected, pl_graph) -print(len(samples)) - -############################################################################## -# Not bad! We have more than 2000 samples to play with 😎. The planted subgraph is actually easy to -# identify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to -# plot graphs in Strawberry Fields: -sub = list(range(20, 30)) -plot_graph = plot.graph(pl_graph, sub) -plotly.offline.plot(plot_graph, filename="planted.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/planted.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``plot_graph.show()`` to view the graph. - -############################################################################## -# A more interesting challenge is to find dense subgraphs of different sizes; it is often -# useful to identify many high-density subgraphs, not just the densest ones. This is the purpose of -# the :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify -# collections of dense subgraphs for a range of sizes. The output of this function is a -# dictionary whose keys correspond to subgraph sizes within the specified range. The values in -# the dictionary are the top subgraphs of that size and their corresponding density. - -dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs -for k in range(8, 17): - print(dense[k][0]) # print only the densest subgraph of each size - -############################################################################## -# From the results of the search we learn that, depending on their size, the densest subgraphs -# belong to different regions of the graph: dense subgraphs of less than ten nodes are contained -# within the planted subgraph, whereas larger dense subgraphs appear outside of the planted -# subgraph. Smaller dense subgraphs can be cliques, characterized by having -# maximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and -# largest subgraphs look like: - -densest_8 = plot.graph(pl_graph, dense[8][0][1]) -densest_16 = plot.graph(pl_graph, dense[12][0][1]) - -plotly.offline.plot(densest_8, filename="densest_8.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/densest_8.html - -plotly.offline.plot(densest_16, filename="densest_16.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/densest_16.html - -############################################################################## -# In principle there are different methods to postprocess GBS outputs to identify dense -# subgraphs. For example, techniques for finding maximum cliques, included in the -# :mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find -# larger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to -# sample dense subgraphs with clever classical techniques. Can you think of your own hybrid -# algorithm? 🤔 diff --git a/doc/tutorials_apps/run_tutorial_dense.py.md5 b/doc/tutorials_apps/run_tutorial_dense.py.md5 deleted file mode 100644 index 456404ca2..000000000 --- a/doc/tutorials_apps/run_tutorial_dense.py.md5 +++ /dev/null @@ -1 +0,0 @@ -a3bc0cddd4d982bccbab3a7fcc4ef7a3 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_dense.rst b/doc/tutorials_apps/run_tutorial_dense.rst deleted file mode 100644 index fdc7a30c0..000000000 --- a/doc/tutorials_apps/run_tutorial_dense.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_dense.py: - - -.. _apps-subgraph-tutorial: - -Dense Subgraphs -=============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.subgraph` - -Graphs can be used to model a wide variety of concepts: social networks, financial markets, -biological networks, and many others. A common problem of interest is to find subgraphs that -contain a large number of connections between their nodes. These subgraphs may correspond to -communities in social networks, correlated assets in a market, or mutually influential proteins -in a biological network. - -Mathematically, this task is known as the `dense subgraph problem -`__. The density of a :math:`k`-node subgraph is equal -to the number of its edges divided by the maximum possible number of edges. -Identifying the densest graph of a given size, known as the densest-:math:`k` subgraph problem, -is `NP-Hard `__. - - -As shown in :cite:`arrazola2018using`, a defining feature of GBS is that when we encode a graph -into a GBS device, it samples dense subgraphs with high probability. This property can be -used to find dense subgraphs by sampling from a GBS device and postprocessing the outputs. -Let's take a look! - -Finding dense subgraphs ------------------------ -The first step is to import all required modules. We'll need the :mod:`~.apps.data` -module to load pre-generated samples, the :mod:`~.apps.sample` module to postselect samples, the -:mod:`~.apps.subgraph` module to search for dense subgraphs, and the :mod:`~.apps.plot` module to -visualize the graphs. We'll also use Plotly which is required for the :mod:`~.apps.plot` module and -NetworkX for graph operations. - - -.. code-block:: default - - from strawberryfields.apps import data, sample, subgraph, plot - import plotly - import networkx as nx - - - - - - - -Here we'll study a 30-node graph with a planted 10-node graph, as considered in -:cite:`arrazola2018using`. The graph is generated by joining two Erdős–Rényi random graphs. The -first graph of 20 nodes is created with edge probability of 0.5. The second planted -graph is generated with edge probability of 0.875. The planted nodes are the last ten nodes in the -graph. The two graphs are joined by selecting 8 nodes at random from both graphs and adding an -edge between them. This graph has the sneaky property that even though the planted subgraph is the -densest of its size, its nodes have a lower average degree than the nodes in the rest of the -graph. - -The :mod:`~.apps.data` module has pre-generated GBS samples from this graph. Let's load them, -postselect on samples with a large number of clicks, and convert them to subgraphs: - - -.. code-block:: default - - - planted = data.Planted() - postselected = sample.postselect(planted, 16, 30) - pl_graph = nx.to_networkx_graph(planted.adj) - samples = sample.to_subgraphs(postselected, pl_graph) - print(len(samples)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 2181 - - -Not bad! We have more than 2000 samples to play with 😎. The planted subgraph is actually easy to -identify; it even appears clearly from the force-directed Kamada-Kawai algorithm that is used to -plot graphs in Strawberry Fields: - - -.. code-block:: default - - sub = list(range(20, 30)) - plot_graph = plot.graph(pl_graph, sub) - plotly.offline.plot(plot_graph, filename="planted.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/planted.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``plot_graph.show()`` to view the graph. - -A more interesting challenge is to find dense subgraphs of different sizes; it is often -useful to identify many high-density subgraphs, not just the densest ones. This is the purpose of -the :func:`~.subgraph.search` function in the :mod:`~.apps.subgraph` module: to identify -collections of dense subgraphs for a range of sizes. The output of this function is a -dictionary whose keys correspond to subgraph sizes within the specified range. The values in -the dictionary are the top subgraphs of that size and their corresponding density. - - -.. code-block:: default - - - dense = subgraph.search(samples, pl_graph, 8, 16, max_count=3) # we look at top 3 densest subgraphs - for k in range(8, 17): - print(dense[k][0]) # print only the densest subgraph of each size - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - (1.0, [21, 22, 24, 25, 26, 27, 28, 29]) - (0.9722222222222222, [21, 22, 23, 24, 25, 26, 27, 28, 29]) - (0.9333333333333333, [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) - (0.7818181818181819, [17, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) - (0.696969696969697, [0, 2, 3, 5, 6, 8, 9, 10, 14, 16, 17, 18]) - (0.6666666666666666, [2, 3, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18]) - (0.6483516483516484, [0, 3, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) - (0.6285714285714286, [0, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) - (0.6083333333333333, [0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) - - -From the results of the search we learn that, depending on their size, the densest subgraphs -belong to different regions of the graph: dense subgraphs of less than ten nodes are contained -within the planted subgraph, whereas larger dense subgraphs appear outside of the planted -subgraph. Smaller dense subgraphs can be cliques, characterized by having -maximum density of 1, while larger subgraphs are less dense. Let's see what the smallest and -largest subgraphs look like: - - -.. code-block:: default - - - densest_8 = plot.graph(pl_graph, dense[8][0][1]) - densest_16 = plot.graph(pl_graph, dense[12][0][1]) - - plotly.offline.plot(densest_8, filename="densest_8.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/densest_8.html - - -.. code-block:: default - - - plotly.offline.plot(densest_16, filename="densest_16.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/densest_16.html - -In principle there are different methods to postprocess GBS outputs to identify dense -subgraphs. For example, techniques for finding maximum cliques, included in the -:mod:`~.apps.clique` module could help provide initial subgraphs that can be resized to find -larger dense subgraphs. Such methods are hybrid algorithms combining the ability of GBS to -sample dense subgraphs with clever classical techniques. Can you think of your own hybrid -algorithm? 🤔 - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 1 minutes 0.299 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_dense.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_dense.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_dense.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_max_clique.ipynb b/doc/tutorials_apps/run_tutorial_max_clique.ipynb deleted file mode 100644 index 140095bf9..000000000 --- a/doc/tutorials_apps/run_tutorial_max_clique.ipynb +++ /dev/null @@ -1,287 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nMaximum Clique\n==============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique`\n\nHere we'll explore how to combine GBS samples with local search algorithms to find large cliques\nin graphs. Let's get started!\n\nA clique is a special type of subgraph where all possible connections between nodes are present;\nthey are densest possible subgraphs of their size. The maximum clique problem, or max clique for\nshort, asks the question: given a graph $G$, what is the largest clique in the graph?\nMax clique is `NP-Hard `_, so finding the biggest clique\nbecomes challenging for graphs with many\nnodes. This is why we need clever algorithms to identify large cliques!\n\nTo get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This\nis the *binding interaction graph* representing the spatial compatibility of atom pairs in a\nprotein-molecule complex. Cliques in this graph correspond to stable docking configurations, which\nare of interest in determining how the molecule interacts with the protein.\n\nThe first step is to import the Strawberry Fields ``apps`` module and external dependencies:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import data, plot, sample, clique\nimport numpy as np\nimport networkx as nx\nimport plotly" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the\ngraph can be visualized using the :mod:`~.apps.plot` module:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "TA = data.TaceAs()\nA = TA.adj\nTA_graph = nx.Graph(A)\nplot_graph = plot.graph(TA_graph)\nplotly.offline.plot(plot_graph, filename=\"TACE-AS.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/TACE-AS.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view your graph.

\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph\nis sufficiently small that all cliques can be found by performing an exhaustive search over\nall subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique\nnot contained inside another clique:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "maximal_clique = [4, 11, 12, 18]\nmaximal_fig = plot.graph(TA_graph, maximal_clique)\nplotly.offline.plot(maximal_fig, filename=\"maximal_clique.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/maximal_clique.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make\nuse of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and\npost-select samples with a specific number of clicks. Here we'll look at samples with eight\nclicks, of which there are a total of 1,984:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "postselected = sample.postselect(TA, 8, 8)\nsamples = sample.to_subgraphs(postselected, TA_graph)\nprint(len(samples))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this\nby comparing the average subgraph density in the GBS samples to uniformly generated samples:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "GBS_dens = []\nu_dens = []\n\nfor s in samples:\n uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample\n GBS_dens.append(nx.density(TA_graph.subgraph(s)))\n u_dens.append(nx.density(TA_graph.subgraph(uniform)))\n\nprint(\"GBS mean density = {:.4f}\".format(np.mean(GBS_dens)))\nprint(\"Uniform mean density = {:.4f}\".format(np.mean(u_dens)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Those look like great GBS samples \ud83d\udcaa! To obtain cliques, we shrink the samples by greedily\nremoving nodes with low degree until a clique is found.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "shrunk = [clique.shrink(s, TA_graph) for s in samples]\nprint(clique.is_clique(TA_graph.subgraph(shrunk[0])))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at some of these cliques. What are the clique sizes in the first ten samples?\nWhat is the average clique size? How about the largest and smallest clique size?\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "clique_sizes = [len(s) for s in shrunk]\nprint(\"First ten clique sizes = \", clique_sizes[:10])\nprint(\"Average clique size = {:.3f}\".format(np.mean(clique_sizes)))\nprint(\"Maximum clique size = \", np.max(clique_sizes))\nprint(\"Minimum clique size = \", np.min(clique_sizes))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Even in the first few samples, we've already identified larger cliques than the 4-node clique\nwe studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average\nsize of roughly five. We can enlarge these cliques by searching for larger cliques in their\nvicinity. We'll do this by taking ten iterations of local search and studying the results.\nNote: this may take a few seconds.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "searched = [clique.search(s, TA_graph, 10) for s in shrunk]\nclique_sizes = [len(s) for s in searched]\nprint(\"First two cliques = \", searched[:2])\nprint(\"Average clique size = {:.3f}\".format(np.mean(clique_sizes)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Wow! Local search is very helpful, we've found cliques with the maximum size of eight for\nessentially all samples \ud83e\udd29. Let's take a look at the first clique we found\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "clique_fig = plot.graph(TA_graph, searched[0])\nplotly.offline.plot(clique_fig, filename=\"maximum_clique.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/maximum_clique.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A\ntougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS\n`_ maximum clique\ndataset. In this section, we'll write a short program that uses GBS samples in combination with\nlocal search to identify large cliques in this graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "Phat = data.PHat() # Load data\nphat_graph = nx.Graph(Phat.adj) # Obtain graph\npostselected = sample.postselect(Phat, 16, 20) # Post-select samples\nsamples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs\nshrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques\nsearched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search\nclique_sizes = [len(s) for s in searched]\nlargest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found\nprint(\"Largest clique found is = \", largest_clique)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's make a plot to take a closer look at the largest clique we found\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "largest_fig = plot.graph(phat_graph, largest_clique)\nplotly.offline.plot(largest_fig, filename=\"largest_clique.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/largest_clique.html\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "just_largest = plot.subgraph(phat_graph.subgraph(largest_clique))\nplotly.offline.plot(just_largest, filename=\"just_largest.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/just_largest.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ``p_hat300-1`` graph has several maximum cliques of size eight,\nand we have managed to find them! What other graphs can you analyze using GBS?\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_max_clique.py b/doc/tutorials_apps/run_tutorial_max_clique.py deleted file mode 100644 index 543409e0d..000000000 --- a/doc/tutorials_apps/run_tutorial_max_clique.py +++ /dev/null @@ -1,164 +0,0 @@ -# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports -""" -.. _apps-clique-tutorial: - -Maximum Clique -============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique` - -Here we'll explore how to combine GBS samples with local search algorithms to find large cliques -in graphs. Let's get started! - -A clique is a special type of subgraph where all possible connections between nodes are present; -they are densest possible subgraphs of their size. The maximum clique problem, or max clique for -short, asks the question: given a graph :math:`G`, what is the largest clique in the graph? -Max clique is `NP-Hard `_, so finding the biggest clique -becomes challenging for graphs with many -nodes. This is why we need clever algorithms to identify large cliques! - -To get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This -is the *binding interaction graph* representing the spatial compatibility of atom pairs in a -protein-molecule complex. Cliques in this graph correspond to stable docking configurations, which -are of interest in determining how the molecule interacts with the protein. - -The first step is to import the Strawberry Fields ``apps`` module and external dependencies: -""" -from strawberryfields.apps import data, plot, sample, clique -import numpy as np -import networkx as nx -import plotly - -############################################################################## -# The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the -# graph can be visualized using the :mod:`~.apps.plot` module: - -TA = data.TaceAs() -A = TA.adj -TA_graph = nx.Graph(A) -plot_graph = plot.graph(TA_graph) -plotly.offline.plot(plot_graph, filename="TACE-AS.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/TACE-AS.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``plot_graph.show()`` to view your graph. - -############################################################################## -# Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph -# is sufficiently small that all cliques can be found by performing an exhaustive search over -# all subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique -# not contained inside another clique: - -maximal_clique = [4, 11, 12, 18] -maximal_fig = plot.graph(TA_graph, maximal_clique) -plotly.offline.plot(maximal_fig, filename="maximal_clique.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/maximal_clique.html - -############################################################################## -# We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make -# use of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and -# post-select samples with a specific number of clicks. Here we'll look at samples with eight -# clicks, of which there are a total of 1,984: - -postselected = sample.postselect(TA, 8, 8) -samples = sample.to_subgraphs(postselected, TA_graph) -print(len(samples)) - -############################################################################## -# GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this -# by comparing the average subgraph density in the GBS samples to uniformly generated samples: - -GBS_dens = [] -u_dens = [] - -for s in samples: - uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample - GBS_dens.append(nx.density(TA_graph.subgraph(s))) - u_dens.append(nx.density(TA_graph.subgraph(uniform))) - -print("GBS mean density = {:.4f}".format(np.mean(GBS_dens))) -print("Uniform mean density = {:.4f}".format(np.mean(u_dens))) - -############################################################################## -# Those look like great GBS samples 💪! To obtain cliques, we shrink the samples by greedily -# removing nodes with low degree until a clique is found. - -shrunk = [clique.shrink(s, TA_graph) for s in samples] -print(clique.is_clique(TA_graph.subgraph(shrunk[0]))) - -############################################################################## -# Let's take a look at some of these cliques. What are the clique sizes in the first ten samples? -# What is the average clique size? How about the largest and smallest clique size? - -clique_sizes = [len(s) for s in shrunk] -print("First ten clique sizes = ", clique_sizes[:10]) -print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) -print("Maximum clique size = ", np.max(clique_sizes)) -print("Minimum clique size = ", np.min(clique_sizes)) - -############################################################################## -# Even in the first few samples, we've already identified larger cliques than the 4-node clique -# we studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average -# size of roughly five. We can enlarge these cliques by searching for larger cliques in their -# vicinity. We'll do this by taking ten iterations of local search and studying the results. -# Note: this may take a few seconds. - -searched = [clique.search(s, TA_graph, 10) for s in shrunk] -clique_sizes = [len(s) for s in searched] -print("First two cliques = ", searched[:2]) -print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) - -############################################################################## -# Wow! Local search is very helpful, we've found cliques with the maximum size of eight for -# essentially all samples 🤩. Let's take a look at the first clique we found - -clique_fig = plot.graph(TA_graph, searched[0]) -plotly.offline.plot(clique_fig, filename="maximum_clique.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/maximum_clique.html - -############################################################################## -# The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A -# tougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS -# `_ maximum clique -# dataset. In this section, we'll write a short program that uses GBS samples in combination with -# local search to identify large cliques in this graph. - -Phat = data.PHat() # Load data -phat_graph = nx.Graph(Phat.adj) # Obtain graph -postselected = sample.postselect(Phat, 16, 20) # Post-select samples -samples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs -shrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques -searched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search -clique_sizes = [len(s) for s in searched] -largest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found -print("Largest clique found is = ", largest_clique) - -############################################################################## -# Let's make a plot to take a closer look at the largest clique we found -largest_fig = plot.graph(phat_graph, largest_clique) -plotly.offline.plot(largest_fig, filename="largest_clique.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/largest_clique.html - -just_largest = plot.subgraph(phat_graph.subgraph(largest_clique)) -plotly.offline.plot(just_largest, filename="just_largest.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/just_largest.html - -############################################################################## -# The ``p_hat300-1`` graph has several maximum cliques of size eight, -# and we have managed to find them! What other graphs can you analyze using GBS? diff --git a/doc/tutorials_apps/run_tutorial_max_clique.py.md5 b/doc/tutorials_apps/run_tutorial_max_clique.py.md5 deleted file mode 100644 index 169cebacb..000000000 --- a/doc/tutorials_apps/run_tutorial_max_clique.py.md5 +++ /dev/null @@ -1 +0,0 @@ -ee889bb45b0c33edb758e978abc8518a \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_max_clique.rst b/doc/tutorials_apps/run_tutorial_max_clique.rst deleted file mode 100644 index ad3d6dd74..000000000 --- a/doc/tutorials_apps/run_tutorial_max_clique.rst +++ /dev/null @@ -1,354 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_max_clique.py: - - -.. _apps-clique-tutorial: - -Maximum Clique -============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.clique` - -Here we'll explore how to combine GBS samples with local search algorithms to find large cliques -in graphs. Let's get started! - -A clique is a special type of subgraph where all possible connections between nodes are present; -they are densest possible subgraphs of their size. The maximum clique problem, or max clique for -short, asks the question: given a graph :math:`G`, what is the largest clique in the graph? -Max clique is `NP-Hard `_, so finding the biggest clique -becomes challenging for graphs with many -nodes. This is why we need clever algorithms to identify large cliques! - -To get started, we'll analyze the 24-node TACE-AS graph used in :cite:`banchi2019molecular`. This -is the *binding interaction graph* representing the spatial compatibility of atom pairs in a -protein-molecule complex. Cliques in this graph correspond to stable docking configurations, which -are of interest in determining how the molecule interacts with the protein. - -The first step is to import the Strawberry Fields ``apps`` module and external dependencies: - - -.. code-block:: default - - from strawberryfields.apps import data, plot, sample, clique - import numpy as np - import networkx as nx - import plotly - - - - - - - -The adjacency matrix of the TACE-AS graph can be loaded from the :mod:`~.apps.data` module and the -graph can be visualized using the :mod:`~.apps.plot` module: - - -.. code-block:: default - - - TA = data.TaceAs() - A = TA.adj - TA_graph = nx.Graph(A) - plot_graph = plot.graph(TA_graph) - plotly.offline.plot(plot_graph, filename="TACE-AS.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/TACE-AS.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``plot_graph.show()`` to view your graph. - -Can you spot any cliques in the graph? It's not so easy using only your eyes! The TACE-AS graph -is sufficiently small that all cliques can be found by performing an exhaustive search over -all subgraphs. For example, below we highlight a small *maximal* clique, i.e., a clique -not contained inside another clique: - - -.. code-block:: default - - - maximal_clique = [4, 11, 12, 18] - maximal_fig = plot.graph(TA_graph, maximal_clique) - plotly.offline.plot(maximal_fig, filename="maximal_clique.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/maximal_clique.html - -We'll now use the :mod:`~.apps.clique` module to find larger cliques in the graph. We can make -use of the pre-generated samples from the TACE-AS graph in the :mod:`~.apps.data` module and -post-select samples with a specific number of clicks. Here we'll look at samples with eight -clicks, of which there are a total of 1,984: - - -.. code-block:: default - - - postselected = sample.postselect(TA, 8, 8) - samples = sample.to_subgraphs(postselected, TA_graph) - print(len(samples)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 1984 - - -GBS produces samples that correspond to subgraphs of high density. For fun, let's confirm this -by comparing the average subgraph density in the GBS samples to uniformly generated samples: - - -.. code-block:: default - - - GBS_dens = [] - u_dens = [] - - for s in samples: - uniform = list(np.random.choice(24, 8, replace=False)) # generates uniform sample - GBS_dens.append(nx.density(TA_graph.subgraph(s))) - u_dens.append(nx.density(TA_graph.subgraph(uniform))) - - print("GBS mean density = {:.4f}".format(np.mean(GBS_dens))) - print("Uniform mean density = {:.4f}".format(np.mean(u_dens))) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - GBS mean density = 0.7005 - Uniform mean density = 0.5874 - - -Those look like great GBS samples 💪! To obtain cliques, we shrink the samples by greedily -removing nodes with low degree until a clique is found. - - -.. code-block:: default - - - shrunk = [clique.shrink(s, TA_graph) for s in samples] - print(clique.is_clique(TA_graph.subgraph(shrunk[0]))) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - True - - -Let's take a look at some of these cliques. What are the clique sizes in the first ten samples? -What is the average clique size? How about the largest and smallest clique size? - - -.. code-block:: default - - - clique_sizes = [len(s) for s in shrunk] - print("First ten clique sizes = ", clique_sizes[:10]) - print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) - print("Maximum clique size = ", np.max(clique_sizes)) - print("Minimum clique size = ", np.min(clique_sizes)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - First ten clique sizes = [4, 5, 6, 7, 4, 4, 4, 6, 5, 5] - Average clique size = 5.009 - Maximum clique size = 8 - Minimum clique size = 3 - - -Even in the first few samples, we've already identified larger cliques than the 4-node clique -we studied before. Awesome! Indeed, this simple shrinking strategy gives cliques with average -size of roughly five. We can enlarge these cliques by searching for larger cliques in their -vicinity. We'll do this by taking ten iterations of local search and studying the results. -Note: this may take a few seconds. - - -.. code-block:: default - - - searched = [clique.search(s, TA_graph, 10) for s in shrunk] - clique_sizes = [len(s) for s in searched] - print("First two cliques = ", searched[:2]) - print("Average clique size = {:.3f}".format(np.mean(clique_sizes))) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - First two cliques = [[5, 11, 13, 14, 16, 20, 21, 22], [1, 2, 4, 7, 8, 10, 17, 23]] - Average clique size = 8.000 - - -Wow! Local search is very helpful, we've found cliques with the maximum size of eight for -essentially all samples 🤩. Let's take a look at the first clique we found - - -.. code-block:: default - - - clique_fig = plot.graph(TA_graph, searched[0]) - plotly.offline.plot(clique_fig, filename="maximum_clique.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/maximum_clique.html - -The TACE-AS graph is relatively small, so finding large cliques is not particularly difficult. A -tougher challenge is the 300-node ``p_hat300-1`` random graph from the `DIMACS -`_ maximum clique -dataset. In this section, we'll write a short program that uses GBS samples in combination with -local search to identify large cliques in this graph. - - -.. code-block:: default - - - Phat = data.PHat() # Load data - phat_graph = nx.Graph(Phat.adj) # Obtain graph - postselected = sample.postselect(Phat, 16, 20) # Post-select samples - samples = sample.to_subgraphs(postselected, phat_graph) # Convert samples into subgraphs - shrunk = [clique.shrink(s, phat_graph) for s in samples] # Shrink subgraphs to cliques - searched = [clique.search(s, phat_graph, 10) for s in shrunk] # Perform local search - clique_sizes = [len(s) for s in searched] - largest_clique = searched[np.argmax(clique_sizes)] # Identify largest clique found - print("Largest clique found is = ", largest_clique) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - Largest clique found is = [114, 121, 132, 138, 173, 189, 199, 249] - - -Let's make a plot to take a closer look at the largest clique we found - - -.. code-block:: default - - largest_fig = plot.graph(phat_graph, largest_clique) - plotly.offline.plot(largest_fig, filename="largest_clique.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/largest_clique.html - - -.. code-block:: default - - - just_largest = plot.subgraph(phat_graph.subgraph(largest_clique)) - plotly.offline.plot(just_largest, filename="just_largest.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/just_largest.html - -The ``p_hat300-1`` graph has several maximum cliques of size eight, -and we have managed to find them! What other graphs can you analyze using GBS? - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 1 minutes 8.666 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_max_clique.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_max_clique.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_max_clique.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_points.ipynb b/doc/tutorials_apps/run_tutorial_points.ipynb deleted file mode 100644 index b681d67b5..000000000 --- a/doc/tutorials_apps/run_tutorial_points.ipynb +++ /dev/null @@ -1,208 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nPoint processes\n===============\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points`\n\nThis section shows how to generate GBS point process samples and use them to detect outlier\npoints in a data set. Point processes are models for generating random point patterns and can be\nuseful in machine learning, providing a source of randomness with\npreference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS\ndevices can be programmed to operate as special types of point processes that generate clustered\nrandom point patterns :cite:`jahangiri2019point`.\n\nThe probability of generating a specific pattern of points in GBS point processes depends on\nmatrix functions of a kernel matrix $K$ that describes the similarity between the points.\nMatrix functions that appear in GBS point processes are typically\n`permanents `__ and\n`hafnians `__. Here we use\nthe permanental point process, in which the probability of observing a pattern of points $S$\ndepends on the permanent of their corresponding kernel submatrix $K_S$ as\n:cite:`jahangiri2019point`:\n\n\\begin{align}\\mathcal{P}(S) = \\frac{1}{\\alpha(S)}\\text{per}(K_S),\\end{align}\n\nwhere $\\alpha$ is a normalization function that depends on $S$ and the average number\nof points. Let's look at a simple example to better understand the permanental point process.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We first import the modules we need. Note that the :mod:`~.apps.points` module has most of\nthe core functionalities exploring point processes.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy as np\nimport plotly\nfrom sklearn.datasets import make_blobs\nfrom strawberryfields.apps import points, plot" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We define a space where the GBS point process patterns are generated. This\nspace is referred to as the state space and is defined by a set of points. The\npoint process selects a subset of these points in each sample. Here we create\na 20 $\\times$ 20 square grid of points.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "R = np.array([(i, j) for i in range(20) for j in range(20)])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The rows of R are the coordinates of the points.\n\nNext step is to create the kernel matrix for the points of this discrete space. We call\nthe :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as:\n\n\\begin{align}K_{i,j} = e^{-\\|\\bf{r}_i-\\bf{r}_j\\|^2/2\\sigma^2},\\end{align}\n\nwhere $\\bf{r}_i$ are the coordinates of point $i$ and $\\sigma$ is a kernel\nparameter that determines the scale of the kernel.\n\nIn the RBF kernel, points that are much further than a distance $\\sigma$ from each other\nlead to small entries of the kernel matrix, whereas points much closer than $\\sigma$\ngenerate large entries. Now consider a specific point pattern in which all points\nare close to each other, which simply means that their matrix elements have larger entries. The\npermanent of a matrix is a sum over the product of some matrix entries. Therefore,\nthe submatrix that corresponds to those points has a large permanent and the probability of\nobserving them in a sample is larger.\n\nFor kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient\nquantum-inspired classical algorithms for permanental point process sampling\n:cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the\nquantum-inspired classical algorithm.\n\nLet's construct the RBF kernel with the parameter $\\sigma$ set to 2.5.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "K = points.rbf_kernel(R, 2.5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We generate 10 samples with an average number of 50 points per sample by calling\nthe :func:`~.points.sample` function of the :mod:`~.apps.points` module.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "samples = points.sample(K, 50.0, 10)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We visualize the first sample by using the :func:`~.points` function of\nthe :mod:`~.apps.plot` module. The point patterns generated by the permanental point process\nusually have a higher degree of clustering compared to a uniformly random pattern.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plot_1 = plot.points(R, samples[0], point_size=10)\n\nplotly.offline.plot(plot_1, filename=\"Points.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/Points.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_1.show()`` to view your graph.

\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Outlier Detection\n-----------------\n\nWhen the distribution of points in a given space is inhomogeneous, GBS point processes\nsample points from the dense regions with higher probability. This feature of the GBS point\nprocesses can be used to detect outlier points in a data set. In this example, we create two\ndense clusters and place them in a two-dimensional space containing some randomly distributed\npoints in the background. We consider the random background points as outliers to the clustered\npoints and show that the permanental point process selects points from the dense clusters with\na higher probability.\n\nWe first create the data points. The clusters have 50 points each and the points have a\nstandard deviation of 0.3. The clusters are centered at $[x = 2, y = 2]$ and $[x = 4,\ny = 4]$, respectively. We also add 25 randomly generated points to the data set.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0]\n\nnoise = np.random.rand(25, 2) * 6.0\n\nR = np.concatenate((clusters, noise))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then construct the kernel matrix and generate 10000 samples.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "K = points.rbf_kernel(R, 1.0)\n\nsamples = points.sample(K, 10.0, 10000)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We obtain the indices of 100 points that appear most frequently in the permanental point\nprocess samples and visualize them. The majority of the commonly appearing points belong\nto the clusters and the points that do not appear frequently are the outlier points. Note that\nsome of the background points might overlap with the clusters.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:]\n\nplot_2 = plot.points(\n R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10\n)\n\nplotly.offline.plot(plot_2, filename=\"Outliers.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/Outliers.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The two-dimensional examples considered here can be easily extended to higher dimensions. The\nGBS point processes retain their clustering property in higher dimensions but visual inspection\nof this clustering feature might not be very straightforward.\n\nGBS point processes can potentially be used in other applications such as clustering data points\nand finding correlations in time series data. Can you design your own example for using GBS point\nprocesses in a new application?\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_points.py b/doc/tutorials_apps/run_tutorial_points.py deleted file mode 100644 index 88677fb6a..000000000 --- a/doc/tutorials_apps/run_tutorial_points.py +++ /dev/null @@ -1,156 +0,0 @@ -# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports,invalid-name -r""" -.. _apps-points-tutorial: - -Point processes -=============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points` - -This section shows how to generate GBS point process samples and use them to detect outlier -points in a data set. Point processes are models for generating random point patterns and can be -useful in machine learning, providing a source of randomness with -preference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS -devices can be programmed to operate as special types of point processes that generate clustered -random point patterns :cite:`jahangiri2019point`. - -The probability of generating a specific pattern of points in GBS point processes depends on -matrix functions of a kernel matrix :math:`K` that describes the similarity between the points. -Matrix functions that appear in GBS point processes are typically -`permanents `__ and -`hafnians `__. Here we use -the permanental point process, in which the probability of observing a pattern of points :math:`S` -depends on the permanent of their corresponding kernel submatrix :math:`K_S` as -:cite:`jahangiri2019point`: - -.. math:: - \mathcal{P}(S) = \frac{1}{\alpha(S)}\text{per}(K_S), - -where :math:`\alpha` is a normalization function that depends on :math:`S` and the average number -of points. Let's look at a simple example to better understand the permanental point process. -""" - -############################################################################## -# We first import the modules we need. Note that the :mod:`~.apps.points` module has most of -# the core functionalities exploring point processes. - -import numpy as np -import plotly -from sklearn.datasets import make_blobs -from strawberryfields.apps import points, plot - -############################################################################## -# We define a space where the GBS point process patterns are generated. This -# space is referred to as the state space and is defined by a set of points. The -# point process selects a subset of these points in each sample. Here we create -# a 20 :math:`\times` 20 square grid of points. - -R = np.array([(i, j) for i in range(20) for j in range(20)]) - -############################################################################## -# The rows of R are the coordinates of the points. -# -# Next step is to create the kernel matrix for the points of this discrete space. We call -# the :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as: -# -# .. math:: -# K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/2\sigma^2}, -# -# where :math:`\bf{r}_i` are the coordinates of point :math:`i` and :math:`\sigma` is a kernel -# parameter that determines the scale of the kernel. -# -# In the RBF kernel, points that are much further than a distance :math:`\sigma` from each other -# lead to small entries of the kernel matrix, whereas points much closer than :math:`\sigma` -# generate large entries. Now consider a specific point pattern in which all points -# are close to each other, which simply means that their matrix elements have larger entries. The -# permanent of a matrix is a sum over the product of some matrix entries. Therefore, -# the submatrix that corresponds to those points has a large permanent and the probability of -# observing them in a sample is larger. -# -# For kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient -# quantum-inspired classical algorithms for permanental point process sampling -# :cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the -# quantum-inspired classical algorithm. -# -# Let's construct the RBF kernel with the parameter :math:`\sigma` set to 2.5. - -K = points.rbf_kernel(R, 2.5) - -############################################################################## -# We generate 10 samples with an average number of 50 points per sample by calling -# the :func:`~.points.sample` function of the :mod:`~.apps.points` module. - -samples = points.sample(K, 50.0, 10) - -############################################################################## -# We visualize the first sample by using the :func:`~.points` function of -# the :mod:`~.apps.plot` module. The point patterns generated by the permanental point process -# usually have a higher degree of clustering compared to a uniformly random pattern. - -plot_1 = plot.points(R, samples[0], point_size=10) - -plotly.offline.plot(plot_1, filename="Points.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/Points.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``plot_1.show()`` to view your graph. - -############################################################################## -# Outlier Detection -# ----------------- -# -# When the distribution of points in a given space is inhomogeneous, GBS point processes -# sample points from the dense regions with higher probability. This feature of the GBS point -# processes can be used to detect outlier points in a data set. In this example, we create two -# dense clusters and place them in a two-dimensional space containing some randomly distributed -# points in the background. We consider the random background points as outliers to the clustered -# points and show that the permanental point process selects points from the dense clusters with -# a higher probability. -# -# We first create the data points. The clusters have 50 points each and the points have a -# standard deviation of 0.3. The clusters are centered at :math:`[x = 2, y = 2]` and :math:`[x = 4, -# y = 4]`, respectively. We also add 25 randomly generated points to the data set. - -clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0] - -noise = np.random.rand(25, 2) * 6.0 - -R = np.concatenate((clusters, noise)) - -############################################################################## -# Then construct the kernel matrix and generate 10000 samples. - -K = points.rbf_kernel(R, 1.0) - -samples = points.sample(K, 10.0, 10000) - -############################################################################## -# We obtain the indices of 100 points that appear most frequently in the permanental point -# process samples and visualize them. The majority of the commonly appearing points belong -# to the clusters and the points that do not appear frequently are the outlier points. Note that -# some of the background points might overlap with the clusters. - -gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:] - -plot_2 = plot.points( - R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10 -) - -plotly.offline.plot(plot_2, filename="Outliers.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/Outliers.html - -############################################################################## -# The two-dimensional examples considered here can be easily extended to higher dimensions. The -# GBS point processes retain their clustering property in higher dimensions but visual inspection -# of this clustering feature might not be very straightforward. -# -# GBS point processes can potentially be used in other applications such as clustering data points -# and finding correlations in time series data. Can you design your own example for using GBS point -# processes in a new application? diff --git a/doc/tutorials_apps/run_tutorial_points.py.md5 b/doc/tutorials_apps/run_tutorial_points.py.md5 deleted file mode 100644 index 413dcf197..000000000 --- a/doc/tutorials_apps/run_tutorial_points.py.md5 +++ /dev/null @@ -1 +0,0 @@ -af38ca5ddec02ac26a4470b10b9a5c9e \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_points.rst b/doc/tutorials_apps/run_tutorial_points.rst deleted file mode 100644 index 2fb4ffb5b..000000000 --- a/doc/tutorials_apps/run_tutorial_points.rst +++ /dev/null @@ -1,264 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_points.py: - - -.. _apps-points-tutorial: - -Point processes -=============== - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.points` - -This section shows how to generate GBS point process samples and use them to detect outlier -points in a data set. Point processes are models for generating random point patterns and can be -useful in machine learning, providing a source of randomness with -preference towards both diversity :cite:`kulesza2012determinantal` and similarity in data. GBS -devices can be programmed to operate as special types of point processes that generate clustered -random point patterns :cite:`jahangiri2019point`. - -The probability of generating a specific pattern of points in GBS point processes depends on -matrix functions of a kernel matrix :math:`K` that describes the similarity between the points. -Matrix functions that appear in GBS point processes are typically -`permanents `__ and -`hafnians `__. Here we use -the permanental point process, in which the probability of observing a pattern of points :math:`S` -depends on the permanent of their corresponding kernel submatrix :math:`K_S` as -:cite:`jahangiri2019point`: - -.. math:: - \mathcal{P}(S) = \frac{1}{\alpha(S)}\text{per}(K_S), - -where :math:`\alpha` is a normalization function that depends on :math:`S` and the average number -of points. Let's look at a simple example to better understand the permanental point process. - -We first import the modules we need. Note that the :mod:`~.apps.points` module has most of -the core functionalities exploring point processes. - - -.. code-block:: default - - - import numpy as np - import plotly - from sklearn.datasets import make_blobs - from strawberryfields.apps import points, plot - - - - - - - -We define a space where the GBS point process patterns are generated. This -space is referred to as the state space and is defined by a set of points. The -point process selects a subset of these points in each sample. Here we create -a 20 :math:`\times` 20 square grid of points. - - -.. code-block:: default - - - R = np.array([(i, j) for i in range(20) for j in range(20)]) - - - - - - - -The rows of R are the coordinates of the points. - -Next step is to create the kernel matrix for the points of this discrete space. We call -the :func:`~.rbf_kernel` function which uses the *radial basis function* (RBF) kernel defined as: - -.. math:: - K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/2\sigma^2}, - -where :math:`\bf{r}_i` are the coordinates of point :math:`i` and :math:`\sigma` is a kernel -parameter that determines the scale of the kernel. - -In the RBF kernel, points that are much further than a distance :math:`\sigma` from each other -lead to small entries of the kernel matrix, whereas points much closer than :math:`\sigma` -generate large entries. Now consider a specific point pattern in which all points -are close to each other, which simply means that their matrix elements have larger entries. The -permanent of a matrix is a sum over the product of some matrix entries. Therefore, -the submatrix that corresponds to those points has a large permanent and the probability of -observing them in a sample is larger. - -For kernel matrices that are positive-semidefinite, such as the RBF kernel, there exist efficient -quantum-inspired classical algorithms for permanental point process sampling -:cite:`jahangiri2019point`. In this tutorial we use positive-semidefinite kernels and the -quantum-inspired classical algorithm. - -Let's construct the RBF kernel with the parameter :math:`\sigma` set to 2.5. - - -.. code-block:: default - - - K = points.rbf_kernel(R, 2.5) - - - - - - - -We generate 10 samples with an average number of 50 points per sample by calling -the :func:`~.points.sample` function of the :mod:`~.apps.points` module. - - -.. code-block:: default - - - samples = points.sample(K, 50.0, 10) - - - - - - - -We visualize the first sample by using the :func:`~.points` function of -the :mod:`~.apps.plot` module. The point patterns generated by the permanental point process -usually have a higher degree of clustering compared to a uniformly random pattern. - - -.. code-block:: default - - - plot_1 = plot.points(R, samples[0], point_size=10) - - plotly.offline.plot(plot_1, filename="Points.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/Points.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``plot_1.show()`` to view your graph. - -Outlier Detection ------------------ - -When the distribution of points in a given space is inhomogeneous, GBS point processes -sample points from the dense regions with higher probability. This feature of the GBS point -processes can be used to detect outlier points in a data set. In this example, we create two -dense clusters and place them in a two-dimensional space containing some randomly distributed -points in the background. We consider the random background points as outliers to the clustered -points and show that the permanental point process selects points from the dense clusters with -a higher probability. - -We first create the data points. The clusters have 50 points each and the points have a -standard deviation of 0.3. The clusters are centered at :math:`[x = 2, y = 2]` and :math:`[x = 4, -y = 4]`, respectively. We also add 25 randomly generated points to the data set. - - -.. code-block:: default - - - clusters = make_blobs(n_samples=100, centers=[[2, 2], [4, 4]], cluster_std=0.3)[0] - - noise = np.random.rand(25, 2) * 6.0 - - R = np.concatenate((clusters, noise)) - - - - - - - -Then construct the kernel matrix and generate 10000 samples. - - -.. code-block:: default - - - K = points.rbf_kernel(R, 1.0) - - samples = points.sample(K, 10.0, 10000) - - - - - - - -We obtain the indices of 100 points that appear most frequently in the permanental point -process samples and visualize them. The majority of the commonly appearing points belong -to the clusters and the points that do not appear frequently are the outlier points. Note that -some of the background points might overlap with the clusters. - - -.. code-block:: default - - - gbs_frequent_points = np.argsort(np.sum(samples, axis=0))[-100:] - - plot_2 = plot.points( - R, [1 if i in gbs_frequent_points else 0 for i in range(len(samples[0]))], point_size=10 - ) - - plotly.offline.plot(plot_2, filename="Outliers.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/Outliers.html - -The two-dimensional examples considered here can be easily extended to higher dimensions. The -GBS point processes retain their clustering property in higher dimensions but visual inspection -of this clustering feature might not be very straightforward. - -GBS point processes can potentially be used in other applications such as clustering data points -and finding correlations in time series data. Can you design your own example for using GBS point -processes in a new application? - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 0 minutes 13.880 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_points.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_points.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_points.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_sample.ipynb b/doc/tutorials_apps/run_tutorial_sample.ipynb deleted file mode 100644 index e15bc8370..000000000 --- a/doc/tutorials_apps/run_tutorial_sample.ipynb +++ /dev/null @@ -1,151 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nSampling from GBS\n=================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample`\n\nA GBS device can be programmed to sample from any symmetric matrix $A$. To sample,\nwe must specify the mean number of photons being generated in the device and optionally the form of\ndetection used at the output: threshold detection or photon-number resolving (PNR) detection.\nThreshold detectors are restricted to measuring whether photons have arrived at the detector,\nwhereas PNR detectors are able to count the number of photons. Photon loss can also be specified\nwith the ``loss`` argument.\n\nSampling functionality is provided in the :mod:`~.apps.sample` module.\n\nLet's take a look at both types of sampling methods. We can generate samples from a random\n5-dimensional symmetric matrix:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import sample\nimport numpy as np\n\nmodes = 5\nn_mean = 6\nsamples = 5\n\nA = np.random.normal(0, 1, (modes, modes))\nA = A + A.T\n\ns_thresh = sample.sample(A, n_mean, samples, threshold=True)\ns_pnr = sample.sample(A, n_mean, samples, threshold=False)\n\nprint(s_thresh)\nprint(s_pnr)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``.\nThreshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were\ndetected in a mode. A ``1`` here is conventionally called a \"click\". PNR samples are\nnon-negative integers counting the number of photons detected in each mode. For example,\nsuppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0,\n1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If\nthreshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``.\n\nA more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure\nGaussian states.\n\nSampling subgraphs\n------------------\n\nSo when would threshold detection or PNR detection be preferred in GBS? Since threshold samples\ncan be post-processed from PNR samples, we might expect that PNR detection is always the\npreferred choice. However, in practice *simulating* PNR-based GBS is significantly slower,\nand it turns out that threshold samples can provide enough useful information for a range of\napplications.\n\nStrawberry Fields provides tools for solving graph-based problems. In this setting,\nwe typically want to use GBS to sample subgraphs, which are likely to be dense due to the\nprobability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling\nis enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a\nsmall fixed graph as an example:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import plot\nimport networkx as nx\nimport plotly\n\nadj = np.array(\n [\n [0, 1, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [1, 1, 1, 1, 0, 1],\n [1, 1, 0, 0, 1, 0],\n ]\n)\n\ngraph = nx.Graph(adj)\nplot_graph = plot.graph(graph)\n\nplotly.offline.plot(plot_graph, filename=\"random_graph.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/random_graph.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_graph.show()`` to view your graph.

\n\nThis is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect\nto be able to sample dense subgraphs with high probability.\n\nSamples can be generated from this graph through GBS using the :func:`~.apps.sample.sample`\nfunction:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "n_mean = 4\nsamples = 20\n\ns = sample.sample(adj, n_mean, samples)\n\nprint(s[:5])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s\nfor nodes that haven't. We want to convert a sample to another representation where the result\nis a list of modes that have clicked. This list of modes can be used to select a subgraph.\nFor example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are\nthe selected nodes of the corresponding subgraph.\n\nHowever, the number of clicks in GBS is a random variable and we are not always guaranteed to\nhave enough clicks in a sample for the resultant subgraph to be of interest. We can filter out\nthe uninteresting samples using the :func:`~.apps.sample.postselect` function:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "min_clicks = 3\nmax_clicks = 4\n\ns = sample.postselect(s, min_clicks, max_clicks)\n\nprint(len(s))\ns.append([0, 1, 0, 1, 1, 0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As expected, we have fewer samples than before. The number of samples that survive this\npostselection is determined by the mean photon number in GBS. We have also added in our example\nsample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following.\n\nLet's convert our postselected samples to subgraphs:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "subgraphs = sample.to_subgraphs(s, graph)\n\nprint(subgraphs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can take a look at one of the sampled subgraphs:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename=\"subgraph.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/subgraph.html\n\nThese sampled subgraphs act as the starting point for some of the applications made available\nin Strawberry Fields, including the maximum clique and dense subgraph identification problems.\n\n

Note

Simulating GBS can be computationally intensive when using both threshold and PNR\n detectors. After all, we are using a classical algorithm to simulate a quantum process!\n To help users get to grips with the applications of Strawberry Fields as quickly as\n possible, we have provided datasets of pre-calculated GBS samples. These datasets are\n available in the :mod:`~.apps.data` module.

\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_sample.py b/doc/tutorials_apps/run_tutorial_sample.py deleted file mode 100644 index c7f1bff3c..000000000 --- a/doc/tutorials_apps/run_tutorial_sample.py +++ /dev/null @@ -1,154 +0,0 @@ -# pylint: disable=invalid-name,no-member,wrong-import-position,wrong-import-order,ungrouped-imports -""" -.. _apps-sample-tutorial: - -Sampling from GBS -================= - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample` - -A GBS device can be programmed to sample from any symmetric matrix :math:`A`. To sample, -we must specify the mean number of photons being generated in the device and optionally the form of -detection used at the output: threshold detection or photon-number resolving (PNR) detection. -Threshold detectors are restricted to measuring whether photons have arrived at the detector, -whereas PNR detectors are able to count the number of photons. Photon loss can also be specified -with the ``loss`` argument. - -Sampling functionality is provided in the :mod:`~.apps.sample` module. - -Let's take a look at both types of sampling methods. We can generate samples from a random -5-dimensional symmetric matrix: -""" - -from strawberryfields.apps import sample -import numpy as np - -modes = 5 -n_mean = 6 -samples = 5 - -A = np.random.normal(0, 1, (modes, modes)) -A = A + A.T - -s_thresh = sample.sample(A, n_mean, samples, threshold=True) -s_pnr = sample.sample(A, n_mean, samples, threshold=False) - -print(s_thresh) -print(s_pnr) - -############################################################################## -# In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``. -# Threshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were -# detected in a mode. A ``1`` here is conventionally called a "click". PNR samples are -# non-negative integers counting the number of photons detected in each mode. For example, -# suppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0, -# 1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If -# threshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``. -# -# A more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure -# Gaussian states. -# -# Sampling subgraphs -# ------------------ -# -# So when would threshold detection or PNR detection be preferred in GBS? Since threshold samples -# can be post-processed from PNR samples, we might expect that PNR detection is always the -# preferred choice. However, in practice *simulating* PNR-based GBS is significantly slower, -# and it turns out that threshold samples can provide enough useful information for a range of -# applications. -# -# Strawberry Fields provides tools for solving graph-based problems. In this setting, -# we typically want to use GBS to sample subgraphs, which are likely to be dense due to the -# probability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling -# is enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a -# small fixed graph as an example: - -from strawberryfields.apps import plot -import networkx as nx -import plotly - -adj = np.array( - [ - [0, 1, 0, 0, 1, 1], - [1, 0, 1, 0, 1, 1], - [0, 1, 0, 1, 1, 0], - [0, 0, 1, 0, 1, 0], - [1, 1, 1, 1, 0, 1], - [1, 1, 0, 0, 1, 0], - ] -) - -graph = nx.Graph(adj) -plot_graph = plot.graph(graph) - -plotly.offline.plot(plot_graph, filename="random_graph.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/random_graph.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``plot_graph.show()`` to view your graph. -# -# This is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect -# to be able to sample dense subgraphs with high probability. -# -# Samples can be generated from this graph through GBS using the :func:`~.apps.sample.sample` -# function: - -n_mean = 4 -samples = 20 - -s = sample.sample(adj, n_mean, samples) - -print(s[:5]) - -############################################################################## -# Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s -# for nodes that haven't. We want to convert a sample to another representation where the result -# is a list of modes that have clicked. This list of modes can be used to select a subgraph. -# For example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are -# the selected nodes of the corresponding subgraph. -# -# However, the number of clicks in GBS is a random variable and we are not always guaranteed to -# have enough clicks in a sample for the resultant subgraph to be of interest. We can filter out -# the uninteresting samples using the :func:`~.apps.sample.postselect` function: - -min_clicks = 3 -max_clicks = 4 - -s = sample.postselect(s, min_clicks, max_clicks) - -print(len(s)) -s.append([0, 1, 0, 1, 1, 0]) - -############################################################################## -# As expected, we have fewer samples than before. The number of samples that survive this -# postselection is determined by the mean photon number in GBS. We have also added in our example -# sample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following. -# -# Let's convert our postselected samples to subgraphs: - -subgraphs = sample.to_subgraphs(s, graph) - -print(subgraphs) - -############################################################################## -# We can take a look at one of the sampled subgraphs: - -plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename="subgraph.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/subgraph.html -# -# These sampled subgraphs act as the starting point for some of the applications made available -# in Strawberry Fields, including the maximum clique and dense subgraph identification problems. -# -# .. note:: -# Simulating GBS can be computationally intensive when using both threshold and PNR -# detectors. After all, we are using a classical algorithm to simulate a quantum process! -# To help users get to grips with the applications of Strawberry Fields as quickly as -# possible, we have provided datasets of pre-calculated GBS samples. These datasets are -# available in the :mod:`~.apps.data` module. diff --git a/doc/tutorials_apps/run_tutorial_sample.py.md5 b/doc/tutorials_apps/run_tutorial_sample.py.md5 deleted file mode 100644 index 87a137b7b..000000000 --- a/doc/tutorials_apps/run_tutorial_sample.py.md5 +++ /dev/null @@ -1 +0,0 @@ -53d531d0740680498035b24e273028d0 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_sample.rst b/doc/tutorials_apps/run_tutorial_sample.rst deleted file mode 100644 index b1afb773d..000000000 --- a/doc/tutorials_apps/run_tutorial_sample.rst +++ /dev/null @@ -1,276 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_sample.py: - - -.. _apps-sample-tutorial: - -Sampling from GBS -================= - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.sample` - -A GBS device can be programmed to sample from any symmetric matrix :math:`A`. To sample, -we must specify the mean number of photons being generated in the device and optionally the form of -detection used at the output: threshold detection or photon-number resolving (PNR) detection. -Threshold detectors are restricted to measuring whether photons have arrived at the detector, -whereas PNR detectors are able to count the number of photons. Photon loss can also be specified -with the ``loss`` argument. - -Sampling functionality is provided in the :mod:`~.apps.sample` module. - -Let's take a look at both types of sampling methods. We can generate samples from a random -5-dimensional symmetric matrix: - - -.. code-block:: default - - - from strawberryfields.apps import sample - import numpy as np - - modes = 5 - n_mean = 6 - samples = 5 - - A = np.random.normal(0, 1, (modes, modes)) - A = A + A.T - - s_thresh = sample.sample(A, n_mean, samples, threshold=True) - s_pnr = sample.sample(A, n_mean, samples, threshold=False) - - print(s_thresh) - print(s_pnr) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [[1, 0, 1, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 1, 1], [1, 0, 0, 1, 0], [1, 1, 0, 1, 1]] - [[0, 0, 0, 0, 0], [4, 1, 0, 4, 1], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [1, 0, 0, 3, 0]] - - -In each case, a sample is a sequence of integers of length five, i.e., ``len(modes) = 5``. -Threshold samples are ``0``'s and ``1``'s, corresponding to whether or not photons were -detected in a mode. A ``1`` here is conventionally called a "click". PNR samples are -non-negative integers counting the number of photons detected in each mode. For example, -suppose a PNR sample is ``[2, 1, 1, 0, 0]``, meaning that 2 photons were detected in mode 0, -1 photons were detected in modes 1 and 2, and 0 photons were detected in modes 3 and 4. If -threshold detectors were used instead, the sample would be: ``[1, 1, 1, 0, 0]``. - -A more general :func:`~.apps.sample.gaussian` function allows for sampling from arbitrary pure -Gaussian states. - -Sampling subgraphs ------------------- - -So when would threshold detection or PNR detection be preferred in GBS? Since threshold samples -can be post-processed from PNR samples, we might expect that PNR detection is always the -preferred choice. However, in practice *simulating* PNR-based GBS is significantly slower, -and it turns out that threshold samples can provide enough useful information for a range of -applications. - -Strawberry Fields provides tools for solving graph-based problems. In this setting, -we typically want to use GBS to sample subgraphs, which are likely to be dense due to the -probability distribution of GBS :cite:`arrazola2018using`. In this case, threshold sampling -is enough, since it lets us select nodes of the subgraph. Let's take a look at this by using a -small fixed graph as an example: - - -.. code-block:: default - - - from strawberryfields.apps import plot - import networkx as nx - import plotly - - adj = np.array( - [ - [0, 1, 0, 0, 1, 1], - [1, 0, 1, 0, 1, 1], - [0, 1, 0, 1, 1, 0], - [0, 0, 1, 0, 1, 0], - [1, 1, 1, 1, 0, 1], - [1, 1, 0, 0, 1, 0], - ] - ) - - graph = nx.Graph(adj) - plot_graph = plot.graph(graph) - - plotly.offline.plot(plot_graph, filename="random_graph.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/random_graph.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``plot_graph.show()`` to view your graph. - -This is a 6-node graph with the nodes ``[0, 1, 4, 5]`` fully connected to each other. We expect -to be able to sample dense subgraphs with high probability. - -Samples can be generated from this graph through GBS using the :func:`~.apps.sample.sample` -function: - - -.. code-block:: default - - - n_mean = 4 - samples = 20 - - s = sample.sample(adj, n_mean, samples) - - print(s[:5]) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] - - -Each sample in ``s`` is a list of modes with ``1``'s for nodes that have clicked and ``0``'s -for nodes that haven't. We want to convert a sample to another representation where the result -is a list of modes that have clicked. This list of modes can be used to select a subgraph. -For example, if ``[0, 1, 0, 1, 1, 0]`` is a sample from GBS then ``[1, 3, 4]`` are -the selected nodes of the corresponding subgraph. - -However, the number of clicks in GBS is a random variable and we are not always guaranteed to -have enough clicks in a sample for the resultant subgraph to be of interest. We can filter out -the uninteresting samples using the :func:`~.apps.sample.postselect` function: - - -.. code-block:: default - - - min_clicks = 3 - max_clicks = 4 - - s = sample.postselect(s, min_clicks, max_clicks) - - print(len(s)) - s.append([0, 1, 0, 1, 1, 0]) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 4 - - -As expected, we have fewer samples than before. The number of samples that survive this -postselection is determined by the mean photon number in GBS. We have also added in our example -sample ``[0, 1, 0, 1, 1, 0]`` to ensure that there is at least one for the following. - -Let's convert our postselected samples to subgraphs: - - -.. code-block:: default - - - subgraphs = sample.to_subgraphs(s, graph) - - print(subgraphs) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [[0, 1, 4, 5], [0, 3, 4], [1, 2, 4], [1, 2, 3, 4], [1, 3, 4]] - - -We can take a look at one of the sampled subgraphs: - - -.. code-block:: default - - - plotly.offline.plot(plot.graph(graph, subgraphs[0]), filename="subgraph.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/subgraph.html - -These sampled subgraphs act as the starting point for some of the applications made available -in Strawberry Fields, including the maximum clique and dense subgraph identification problems. - -.. note:: - Simulating GBS can be computationally intensive when using both threshold and PNR - detectors. After all, we are using a classical algorithm to simulate a quantum process! - To help users get to grips with the applications of Strawberry Fields as quickly as - possible, we have provided datasets of pre-calculated GBS samples. These datasets are - available in the :mod:`~.apps.data` module. - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 0 minutes 8.272 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_sample.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_sample.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_sample.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_similarity.ipynb b/doc/tutorials_apps/run_tutorial_similarity.ipynb deleted file mode 100644 index 84615da86..000000000 --- a/doc/tutorials_apps/run_tutorial_similarity.ipynb +++ /dev/null @@ -1,392 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nGraph similarity\n================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity`\n\nThis page looks at how to use GBS to construct a similarity measure between graphs,\nknown as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based\ndata for machine learning tasks such as classification using a support vector machine.\n\nGraph data\n----------\n\nWe begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs,\nwhich will be needed in the following.\n\nLet's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a\ndataset of 188 different graphs that each correspond to the structure of a chemical compound. Our\ngoal is to use GBS samples from these graphs to measure their similarity.\n\nThe :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG\ndataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting\nphoton click events. We'll start by loading four sets of samples and visualizing the\ncorresponding graphs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import data, plot, similarity\n\nm0 = data.Mutag0()\nm1 = data.Mutag1()\nm2 = data.Mutag2()\nm3 = data.Mutag3()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These datasets contain both the adjacency matrix of the graph and the samples generated through\nGBS. We can access the adjacency matrix through:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "m0_a = m0.adj\nm1_a = m1.adj\nm2_a = m2.adj\nm3_a = m3.adj" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Samples from these graphs can be accessed by indexing:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(m0[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module,\nwe need to convert the adjacency matrices into NetworkX Graphs:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import networkx as nx\nimport plotly\n\nplot_mutag_0 = plot.graph(nx.Graph(m0_a))\nplot_mutag_1 = plot.graph(nx.Graph(m1_a))\nplot_mutag_2 = plot.graph(nx.Graph(m2_a))\nplot_mutag_3 = plot.graph(nx.Graph(m3_a))\n\nplotly.offline.plot(plot_mutag_0, filename=\"MUTAG_0.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/MUTAG_0.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``plot_mutag_0.show()`` to view your graph.

\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plotly.offline.plot(plot_mutag_1, filename=\"MUTAG_1.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/MUTAG_1.html\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plotly.offline.plot(plot_mutag_2, filename=\"MUTAG_2.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/MUTAG_2.html\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plotly.offline.plot(plot_mutag_3, filename=\"MUTAG_3.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/MUTAG_3.html\n\nThe graphs of ``m1_a`` and ``m2_a`` look very similar. In fact,\nit turns out that they are *isomorphic* to each other, which means that the graphs can be made\nidentical by permuting their node labels.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Creating a feature vector\n-------------------------\n\nFollowing :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph.\nThese feature vectors contain information about the graphs and can be viewed as a mapping to a\nhigh-dimensional feature space, a technique often used in machine learning that allows us to\nemploy properties of the feature space to separate and classify the vectors.\n\nThe feature vector of a graph can be composed in a variety of ways. One approach is to\nassociate features with the relative frequencies of certain types of measurements being\nrecorded from a GBS device configured to sample from the graph, as we now discuss.\n\nWe begin by defining the concept of an *orbit*, which is the set of all GBS samples that are\nequivalent under permutation of the modes. A sample can be converted to its corresponding orbit\nusing the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0,\n0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.sample_to_orbit(m0[0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples\ncan be randomly generated from the ``[1, 1]`` orbit using:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.orbit_to_sample([1, 1], modes=m0.modes))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are\nstatistically more likely to be observed. However, we are interested in coarse-graining further\ninto *events*, which correspond to a combination of orbits with the same photon number such\nthat the number of photons counted in each mode does not exceed a fixed value\n``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon\nnumber of 5:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(list(similarity.orbits(5)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "All 5-photon samples belong to one of the orbits above. A 5-photon event with\n``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1],\n[3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example,\nthe sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have mastered orbits and events, how can we make a feature vector? It was shown in\n:cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the\nfrequencies of events. Specifically, for a $k$ photon event $E_{k, n_{\\max}}$\nwith maximum count per mode $n_{\\max}$ and corresponding probability $p_{k,\nn_{\\max}}:=p_{E_{k, n_{\\max}}}(G)$ with respect to a graph $G$, a feature vector can be\nwritten as\n\n\\begin{align}f_{\\mathbf{k}, n_{\\max}} = (p_{k_{1}, n_{\\max}}, p_{k_{2}, n_{\\max}}, \\ldots , p_{k_{K},\n n_{\\max}}),\\end{align}\n\nwhere $\\mathbf{k} := (k_{1}, k_{2}, \\ldots , k_{K})$ is a list of different total photon\nnumbers.\n\nFor example, if $\\mathbf{k} := (2, 4, 6)$ and $n_{\\max} = 2$, we have\n\n\\begin{align}f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}).\\end{align}\n\nIn this case, we are interested in the probabilities of events $E_{2, 2}$, $E_{4,\n2}$, and $E_{6, 2}$. Suppose we are sampling from a four-mode device and have the samples\n``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and\n``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the $E_{4, 2}$ event while\n``[2, 1, 1]`` is.\n\nCalculating a feature vector\n----------------------------\n\nWe provide two methods for calculating a feature vector of GBS event probabilities in\nStrawberry Fields:\n\n1. Through sampling.\n2. Using a Monte Carlo estimate of the probability.\n\nIn the first method, all one needs to do is generate some GBS samples from the graph of\ninterest and fix the composition of the feature vector. For example, for a feature vector\n$f_{\\mathbf{k} = (2, 4, 6), n_{\\max}=2}$ we use:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For the second method, suppose we want to calculate the event probabilities exactly rather than\nthrough sampling. To do this, we consider the event probability $p_{k, n_{\\max}}$ as the\nsum over all sample probabilities in the event. In GBS, each sample probability is determined by\nthe hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes\ncalculating $p_{k, n_{\\max}}$ really challenging is the number of samples the corresponding\nevent contains! For example, the 6-photon event over 17 modes $E_{k=6, n_{\\max}=2}$\ncontains the following number of samples :\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.event_cardinality(6, 2, 17))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To avoid calculating a large number of sample probabilities, an alternative is to perform a\nMonte Carlo approximation. Here, samples within an event are selected uniformly at random and\ntheir resultant probabilities are calculated. If $N$ samples $\\{S_{1}, S_{2},\n\\ldots , S_{N}\\}$ are generated, then the event probability can be approximated as\n\n\\begin{align}p(E_{k, n_{\\max}}) \\approx \\frac{1}{N}\\sum_{i=1}^N p(S_i) |E_{k, n_{\\max}}|,\\end{align}\n\nwith $|E_{k, n_{\\max}}|$ denoting the cardinality of the event.\n\nThis method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is\napproximated as:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The feature vector can then be calculated through Monte Carlo sampling using\n:func:`~.feature_vector_mc`.\n\n

Note

The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and\n may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy\n but slow down calculation.

\n\nThe second method of Monte Carlo approximation is intended for use in scenarios where it is\ncomputationally intensive to pre-calculate a statistically significant dataset of samples from\nGBS.\n\nMachine learning with GBS graph kernels\n---------------------------------------\n\nThe power of feature vectors that embed graphs in a vector space of real numbers is that we can\nnow measure similarities between graphs. This is very useful in machine learning, where similar\nlabels are assigned to graphs that are close to each other. GBS feature vectors therefore give\nrise to a similarity measure between graphs!\n\nLet's build this up a bit more. The MUTAG dataset we are considering contains not only graphs\ncorresponding to the structure of chemical compounds, but also a *label* of each\ncompound based upon its mutagenic effect. The four graphs we consider here have labels:\n\n- MUTAG0: Class 1\n- MUTAG1: Class 0\n- MUTAG2: Class 0\n- MUTAG3: Class 1\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "classes = [1, 0, 0, 1]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use GBS feature vectors in a `support vector machine\n`__ (SVM) that finds a separating\nhyperplane between classes in the feature space. We start by defining two-dimensional feature\nvectors:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "events = [8, 10]\nmax_count = 2\n\nf1 = similarity.feature_vector_sampling(m0, events, max_count)\nf2 = similarity.feature_vector_sampling(m1, events, max_count)\nf3 = similarity.feature_vector_sampling(m2, events, max_count)\nf4 = similarity.feature_vector_sampling(m3, events, max_count)\n\nimport numpy as np\n\nR = np.array([f1, f2, f3, f4])\n\nprint(R)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There is freedom in the choice of ``events`` composing the feature vectors and we encourage the\nreader to explore different combinations. Note, however, that odd photon-numbered events have\nzero probability because ideal GBS only generates and outputs pairs of photons.\n\nGiven our points in the feature space and their target labels, we can use\nscikit-learn's Support Vector Machine `LinearSVC `__ as our model to train:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\n\nR_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance\n\nclassifier = LinearSVC()\nclassifier.fit(R_scaled, classes)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, the term \"linear\" refers to the *kernel* function used to calculate inner products\nbetween vectors in the space. We can use a linear SVM because we have already embedded the\ngraphs in a feature space based on GBS. We have also rescaled the feature vectors so that they\nzero mean and unit variance using scikit-learn's ``StandardScaler``, a technique\n`often used `__ in machine learning.\n\nWe can then visualize the trained SVM by plotting the decision boundary with respect to the\npoints:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "w = classifier.coef_[0]\ni = classifier.intercept_[0]\n\nm = -w[0] / w[1] # finding the values for y = mx + b\nb = -i / w[1]\n\nxx = [-1, 1]\nyy = [m * x + b for x in xx]\n\nfig = plot.points(R_scaled, classes)\nfig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode=\"lines\"))\n\nplotly.offline.plot(fig, filename=\"SVM.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/SVM.html\n\nThis plot shows the two classes (grey points for class 0 and red points for class 1)\nsuccessfully separated by the linear hyperplane using the GBS feature space. Moreover,\nrecall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly,\ntheir corresponding feature vectors are very similar. In fact, the feature vectors of\nisomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy\nin this plot is due to the statistical approximation from sampling.\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_similarity.py b/doc/tutorials_apps/run_tutorial_similarity.py deleted file mode 100644 index e4b985a9b..000000000 --- a/doc/tutorials_apps/run_tutorial_similarity.py +++ /dev/null @@ -1,314 +0,0 @@ -# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports,invalid-name -""" -.. _apps-sim-tutorial: - -Graph similarity -================ - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity` - -This page looks at how to use GBS to construct a similarity measure between graphs, -known as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based -data for machine learning tasks such as classification using a support vector machine. - -Graph data ----------- - -We begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs, -which will be needed in the following. - -Let's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a -dataset of 188 different graphs that each correspond to the structure of a chemical compound. Our -goal is to use GBS samples from these graphs to measure their similarity. - -The :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG -dataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting -photon click events. We'll start by loading four sets of samples and visualizing the -corresponding graphs. -""" - -from strawberryfields.apps import data, plot, similarity - -m0 = data.Mutag0() -m1 = data.Mutag1() -m2 = data.Mutag2() -m3 = data.Mutag3() - -############################################################################## -# These datasets contain both the adjacency matrix of the graph and the samples generated through -# GBS. We can access the adjacency matrix through: - -m0_a = m0.adj -m1_a = m1.adj -m2_a = m2.adj -m3_a = m3.adj - -############################################################################## -# Samples from these graphs can be accessed by indexing: - -print(m0[0]) - -############################################################################## -# We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module, -# we need to convert the adjacency matrices into NetworkX Graphs: - -import networkx as nx -import plotly - -plot_mutag_0 = plot.graph(nx.Graph(m0_a)) -plot_mutag_1 = plot.graph(nx.Graph(m1_a)) -plot_mutag_2 = plot.graph(nx.Graph(m2_a)) -plot_mutag_3 = plot.graph(nx.Graph(m3_a)) - -plotly.offline.plot(plot_mutag_0, filename="MUTAG_0.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/MUTAG_0.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``plot_mutag_0.show()`` to view your graph. - -plotly.offline.plot(plot_mutag_1, filename="MUTAG_1.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/MUTAG_1.html - -plotly.offline.plot(plot_mutag_2, filename="MUTAG_2.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/MUTAG_2.html - -plotly.offline.plot(plot_mutag_3, filename="MUTAG_3.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/MUTAG_3.html -# -# The graphs of ``m1_a`` and ``m2_a`` look very similar. In fact, -# it turns out that they are *isomorphic* to each other, which means that the graphs can be made -# identical by permuting their node labels. - -############################################################################## -# Creating a feature vector -# ------------------------- -# -# Following :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph. -# These feature vectors contain information about the graphs and can be viewed as a mapping to a -# high-dimensional feature space, a technique often used in machine learning that allows us to -# employ properties of the feature space to separate and classify the vectors. -# -# The feature vector of a graph can be composed in a variety of ways. One approach is to -# associate features with the relative frequencies of certain types of measurements being -# recorded from a GBS device configured to sample from the graph, as we now discuss. -# -# We begin by defining the concept of an *orbit*, which is the set of all GBS samples that are -# equivalent under permutation of the modes. A sample can be converted to its corresponding orbit -# using the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0, -# 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit: - -print(similarity.sample_to_orbit(m0[0])) - -############################################################################## -# Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples -# can be randomly generated from the ``[1, 1]`` orbit using: - -print(similarity.orbit_to_sample([1, 1], modes=m0.modes)) - -############################################################################## -# Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are -# statistically more likely to be observed. However, we are interested in coarse-graining further -# into *events*, which correspond to a combination of orbits with the same photon number such -# that the number of photons counted in each mode does not exceed a fixed value -# ``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon -# number of 5: - -print(list(similarity.orbits(5))) - -############################################################################## -# All 5-photon samples belong to one of the orbits above. A 5-photon event with -# ``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1], -# [3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example, -# the sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event: - -print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3)) - -############################################################################## -# Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event: - -print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)) - -############################################################################## -# Now that we have mastered orbits and events, how can we make a feature vector? It was shown in -# :cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the -# frequencies of events. Specifically, for a :math:`k` photon event :math:`E_{k, n_{\max}}` -# with maximum count per mode :math:`n_{\max}` and corresponding probability :math:`p_{k, -# n_{\max}}:=p_{E_{k, n_{\max}}}(G)` with respect to a graph :math:`G`, a feature vector can be -# written as -# -# .. math:: -# f_{\mathbf{k}, n_{\max}} = (p_{k_{1}, n_{\max}}, p_{k_{2}, n_{\max}}, \ldots , p_{k_{K}, -# n_{\max}}), -# -# where :math:`\mathbf{k} := (k_{1}, k_{2}, \ldots , k_{K})` is a list of different total photon -# numbers. -# -# For example, if :math:`\mathbf{k} := (2, 4, 6)` and :math:`n_{\max} = 2`, we have -# -# .. math:: -# f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}). -# -# In this case, we are interested in the probabilities of events :math:`E_{2, 2}`, :math:`E_{4, -# 2}`, and :math:`E_{6, 2}`. Suppose we are sampling from a four-mode device and have the samples -# ``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and -# ``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the :math:`E_{4, 2}` event while -# ``[2, 1, 1]`` is. -# -# Calculating a feature vector -# ---------------------------- -# -# We provide two methods for calculating a feature vector of GBS event probabilities in -# Strawberry Fields: -# -# 1. Through sampling. -# 2. Using a Monte Carlo estimate of the probability. -# -# In the first method, all one needs to do is generate some GBS samples from the graph of -# interest and fix the composition of the feature vector. For example, for a feature vector -# :math:`f_{\mathbf{k} = (2, 4, 6), n_{\max}=2}` we use: - -print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2)) - -############################################################################## -# For the second method, suppose we want to calculate the event probabilities exactly rather than -# through sampling. To do this, we consider the event probability :math:`p_{k, n_{\max}}` as the -# sum over all sample probabilities in the event. In GBS, each sample probability is determined by -# the hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes -# calculating :math:`p_{k, n_{\max}}` really challenging is the number of samples the corresponding -# event contains! For example, the 6-photon event over 17 modes :math:`E_{k=6, n_{\max}=2}` -# contains the following number of samples : - -print(similarity.event_cardinality(6, 2, 17)) - -############################################################################## -# To avoid calculating a large number of sample probabilities, an alternative is to perform a -# Monte Carlo approximation. Here, samples within an event are selected uniformly at random and -# their resultant probabilities are calculated. If :math:`N` samples :math:`\{S_{1}, S_{2}, -# \ldots , S_{N}\}` are generated, then the event probability can be approximated as -# -# .. math:: -# p(E_{k, n_{\max}}) \approx \frac{1}{N}\sum_{i=1}^N p(S_i) |E_{k, n_{\max}}|, -# -# with :math:`|E_{k, n_{\max}}|` denoting the cardinality of the event. -# -# This method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is -# approximated as: - -print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6)) - -############################################################################## -# The feature vector can then be calculated through Monte Carlo sampling using -# :func:`~.feature_vector_mc`. -# -# .. note:: -# The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and -# may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy -# but slow down calculation. -# -# The second method of Monte Carlo approximation is intended for use in scenarios where it is -# computationally intensive to pre-calculate a statistically significant dataset of samples from -# GBS. -# -# Machine learning with GBS graph kernels -# --------------------------------------- -# -# The power of feature vectors that embed graphs in a vector space of real numbers is that we can -# now measure similarities between graphs. This is very useful in machine learning, where similar -# labels are assigned to graphs that are close to each other. GBS feature vectors therefore give -# rise to a similarity measure between graphs! -# -# Let's build this up a bit more. The MUTAG dataset we are considering contains not only graphs -# corresponding to the structure of chemical compounds, but also a *label* of each -# compound based upon its mutagenic effect. The four graphs we consider here have labels: -# -# - MUTAG0: Class 1 -# - MUTAG1: Class 0 -# - MUTAG2: Class 0 -# - MUTAG3: Class 1 - -classes = [1, 0, 0, 1] - -############################################################################## -# We can use GBS feature vectors in a `support vector machine -# `__ (SVM) that finds a separating -# hyperplane between classes in the feature space. We start by defining two-dimensional feature -# vectors: - -events = [8, 10] -max_count = 2 - -f1 = similarity.feature_vector_sampling(m0, events, max_count) -f2 = similarity.feature_vector_sampling(m1, events, max_count) -f3 = similarity.feature_vector_sampling(m2, events, max_count) -f4 = similarity.feature_vector_sampling(m3, events, max_count) - -import numpy as np - -R = np.array([f1, f2, f3, f4]) - -print(R) - -############################################################################## -# There is freedom in the choice of ``events`` composing the feature vectors and we encourage the -# reader to explore different combinations. Note, however, that odd photon-numbered events have -# zero probability because ideal GBS only generates and outputs pairs of photons. -# -# Given our points in the feature space and their target labels, we can use -# scikit-learn's Support Vector Machine `LinearSVC `__ as our model to train: - -from sklearn.svm import LinearSVC -from sklearn.preprocessing import StandardScaler - -R_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance - -classifier = LinearSVC() -classifier.fit(R_scaled, classes) - -############################################################################## -# Here, the term "linear" refers to the *kernel* function used to calculate inner products -# between vectors in the space. We can use a linear SVM because we have already embedded the -# graphs in a feature space based on GBS. We have also rescaled the feature vectors so that they -# zero mean and unit variance using scikit-learn's ``StandardScaler``, a technique -# `often used `__ in machine learning. -# -# We can then visualize the trained SVM by plotting the decision boundary with respect to the -# points: - -w = classifier.coef_[0] -i = classifier.intercept_[0] - -m = -w[0] / w[1] # finding the values for y = mx + b -b = -i / w[1] - -xx = [-1, 1] -yy = [m * x + b for x in xx] - -fig = plot.points(R_scaled, classes) -fig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode="lines")) - -plotly.offline.plot(fig, filename="SVM.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/SVM.html -# -# This plot shows the two classes (grey points for class 0 and red points for class 1) -# successfully separated by the linear hyperplane using the GBS feature space. Moreover, -# recall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly, -# their corresponding feature vectors are very similar. In fact, the feature vectors of -# isomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy -# in this plot is due to the statistical approximation from sampling. diff --git a/doc/tutorials_apps/run_tutorial_similarity.py.md5 b/doc/tutorials_apps/run_tutorial_similarity.py.md5 deleted file mode 100644 index a62980ce8..000000000 --- a/doc/tutorials_apps/run_tutorial_similarity.py.md5 +++ /dev/null @@ -1 +0,0 @@ -d84b65b306f8ed47beabf3b7ac1d4425 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_similarity.rst b/doc/tutorials_apps/run_tutorial_similarity.rst deleted file mode 100644 index e0d3341bc..000000000 --- a/doc/tutorials_apps/run_tutorial_similarity.rst +++ /dev/null @@ -1,596 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_similarity.py: - - -.. _apps-sim-tutorial: - -Graph similarity -================ - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.similarity` - -This page looks at how to use GBS to construct a similarity measure between graphs, -known as a graph kernel :cite:`schuld2019quantum`. Kernels can be applied to graph-based -data for machine learning tasks such as classification using a support vector machine. - -Graph data ----------- - -We begin by fixing a dataset of graphs to consider and loading GBS samples from these graphs, -which will be needed in the following. - -Let's use the MUTAG dataset of graphs :cite:`debnath1991structure,kriege2012subgraph`. This is a -dataset of 188 different graphs that each correspond to the structure of a chemical compound. Our -goal is to use GBS samples from these graphs to measure their similarity. - -The :mod:`~.apps.data` module provides pre-calculated GBS samples for selected graphs in the MUTAG -dataset. Each set of samples is generated by encoding the graph into a GBS device, and collecting -photon click events. We'll start by loading four sets of samples and visualizing the -corresponding graphs. - - -.. code-block:: default - - - from strawberryfields.apps import data, plot, similarity - - m0 = data.Mutag0() - m1 = data.Mutag1() - m2 = data.Mutag2() - m3 = data.Mutag3() - - - - - - - -These datasets contain both the adjacency matrix of the graph and the samples generated through -GBS. We can access the adjacency matrix through: - - -.. code-block:: default - - - m0_a = m0.adj - m1_a = m1.adj - m2_a = m2.adj - m3_a = m3.adj - - - - - - - -Samples from these graphs can be accessed by indexing: - - -.. code-block:: default - - - print(m0[0]) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0] - - -We can now plot the four graphs using the :mod:`~.apps.plot` module. To use this module, -we need to convert the adjacency matrices into NetworkX Graphs: - - -.. code-block:: default - - - import networkx as nx - import plotly - - plot_mutag_0 = plot.graph(nx.Graph(m0_a)) - plot_mutag_1 = plot.graph(nx.Graph(m1_a)) - plot_mutag_2 = plot.graph(nx.Graph(m2_a)) - plot_mutag_3 = plot.graph(nx.Graph(m3_a)) - - plotly.offline.plot(plot_mutag_0, filename="MUTAG_0.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/MUTAG_0.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``plot_mutag_0.show()`` to view your graph. - - -.. code-block:: default - - - plotly.offline.plot(plot_mutag_1, filename="MUTAG_1.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/MUTAG_1.html - - -.. code-block:: default - - - plotly.offline.plot(plot_mutag_2, filename="MUTAG_2.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/MUTAG_2.html - - -.. code-block:: default - - - plotly.offline.plot(plot_mutag_3, filename="MUTAG_3.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/MUTAG_3.html - -The graphs of ``m1_a`` and ``m2_a`` look very similar. In fact, -it turns out that they are *isomorphic* to each other, which means that the graphs can be made -identical by permuting their node labels. - -Creating a feature vector -------------------------- - -Following :cite:`schuld2019quantum`, we can create a *feature vector* to describe each graph. -These feature vectors contain information about the graphs and can be viewed as a mapping to a -high-dimensional feature space, a technique often used in machine learning that allows us to -employ properties of the feature space to separate and classify the vectors. - -The feature vector of a graph can be composed in a variety of ways. One approach is to -associate features with the relative frequencies of certain types of measurements being -recorded from a GBS device configured to sample from the graph, as we now discuss. - -We begin by defining the concept of an *orbit*, which is the set of all GBS samples that are -equivalent under permutation of the modes. A sample can be converted to its corresponding orbit -using the :func:`~.sample_to_orbit` function. For example, the first sample of ``m0`` is ``[0, -0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]`` and has orbit: - - -.. code-block:: default - - - print(similarity.sample_to_orbit(m0[0])) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [1, 1] - - -Here, ``[1, 1]`` means that two photons were detected, each in a separate mode. Other samples -can be randomly generated from the ``[1, 1]`` orbit using: - - -.. code-block:: default - - - print(similarity.orbit_to_sample([1, 1], modes=m0.modes)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0] - - -Orbits provide a useful way to coarse-grain the samples from GBS into outcomes that are -statistically more likely to be observed. However, we are interested in coarse-graining further -into *events*, which correspond to a combination of orbits with the same photon number such -that the number of photons counted in each mode does not exceed a fixed value -``max_count_per_mode``. To understand this, let's look at all of the orbits with a photon -number of 5: - - -.. code-block:: default - - - print(list(similarity.orbits(5))) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [[1, 1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1], [2, 2, 1], [4, 1], [3, 2], [5]] - - -All 5-photon samples belong to one of the orbits above. A 5-photon event with -``max_count_per_mode = 3`` means that we include the orbits: ``[[1, 1, 1, 1, 1], [2, 1, 1, 1], -[3, 1, 1], [2, 2, 1], [3, 2]]`` and ignore the orbits ``[[4, 1], [5]]``. For example, -the sample ``[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0]`` is a 5-photon event: - - -.. code-block:: default - - - print(similarity.sample_to_event([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0], 3)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 5 - - -Samples with more than ``max_count_per_mode`` in any mode are not counted as part of the event: - - -.. code-block:: default - - - print(similarity.sample_to_event([0, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - None - - -Now that we have mastered orbits and events, how can we make a feature vector? It was shown in -:cite:`schuld2019quantum` that one way of making a feature vector of a graph is through the -frequencies of events. Specifically, for a :math:`k` photon event :math:`E_{k, n_{\max}}` -with maximum count per mode :math:`n_{\max}` and corresponding probability :math:`p_{k, -n_{\max}}:=p_{E_{k, n_{\max}}}(G)` with respect to a graph :math:`G`, a feature vector can be -written as - -.. math:: - f_{\mathbf{k}, n_{\max}} = (p_{k_{1}, n_{\max}}, p_{k_{2}, n_{\max}}, \ldots , p_{k_{K}, - n_{\max}}), - -where :math:`\mathbf{k} := (k_{1}, k_{2}, \ldots , k_{K})` is a list of different total photon -numbers. - -For example, if :math:`\mathbf{k} := (2, 4, 6)` and :math:`n_{\max} = 2`, we have - -.. math:: - f_{(2, 4, 6), 2} = (p_{2, 2}, p_{4, 2}, p_{6, 2}). - -In this case, we are interested in the probabilities of events :math:`E_{2, 2}`, :math:`E_{4, -2}`, and :math:`E_{6, 2}`. Suppose we are sampling from a four-mode device and have the samples -``[0, 3, 0, 1]`` and ``[1, 2, 0, 1]``. These samples are part of the orbits ``[3, 1]`` and -``[2, 1, 1]``, respectively. However, ``[3, 1]`` is not part of the :math:`E_{4, 2}` event while -``[2, 1, 1]`` is. - -Calculating a feature vector ----------------------------- - -We provide two methods for calculating a feature vector of GBS event probabilities in -Strawberry Fields: - -1. Through sampling. -2. Using a Monte Carlo estimate of the probability. - -In the first method, all one needs to do is generate some GBS samples from the graph of -interest and fix the composition of the feature vector. For example, for a feature vector -:math:`f_{\mathbf{k} = (2, 4, 6), n_{\max}=2}` we use: - - -.. code-block:: default - - - print(similarity.feature_vector_sampling(m0, event_photon_numbers=[2, 4, 6], max_count_per_mode=2)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [0.19035, 0.2047, 0.1539] - - -For the second method, suppose we want to calculate the event probabilities exactly rather than -through sampling. To do this, we consider the event probability :math:`p_{k, n_{\max}}` as the -sum over all sample probabilities in the event. In GBS, each sample probability is determined by -the hafnian of a relevant sub-adjacency matrix. While this is tough to calculate, what makes -calculating :math:`p_{k, n_{\max}}` really challenging is the number of samples the corresponding -event contains! For example, the 6-photon event over 17 modes :math:`E_{k=6, n_{\max}=2}` -contains the following number of samples : - - -.. code-block:: default - - - print(similarity.event_cardinality(6, 2, 17)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 58276 - - -To avoid calculating a large number of sample probabilities, an alternative is to perform a -Monte Carlo approximation. Here, samples within an event are selected uniformly at random and -their resultant probabilities are calculated. If :math:`N` samples :math:`\{S_{1}, S_{2}, -\ldots , S_{N}\}` are generated, then the event probability can be approximated as - -.. math:: - p(E_{k, n_{\max}}) \approx \frac{1}{N}\sum_{i=1}^N p(S_i) |E_{k, n_{\max}}|, - -with :math:`|E_{k, n_{\max}}|` denoting the cardinality of the event. - -This method can be accessed using the :func:`~.prob_event_mc` function. The 4-photon event is -approximated as: - - -.. code-block:: default - - - print(similarity.prob_event_mc(nx.Graph(m0_a), 4, max_count_per_mode=2, n_mean=6)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - 0.20519880474018276 - - -The feature vector can then be calculated through Monte Carlo sampling using -:func:`~.feature_vector_mc`. - -.. note:: - The results of :func:`~.prob_event_mc` and :func:`~.feature_vector_mc` are probabilistic and - may vary between runs. Increasing the optional ``samples`` parameter will increase accuracy - but slow down calculation. - -The second method of Monte Carlo approximation is intended for use in scenarios where it is -computationally intensive to pre-calculate a statistically significant dataset of samples from -GBS. - -Machine learning with GBS graph kernels ---------------------------------------- - -The power of feature vectors that embed graphs in a vector space of real numbers is that we can -now measure similarities between graphs. This is very useful in machine learning, where similar -labels are assigned to graphs that are close to each other. GBS feature vectors therefore give -rise to a similarity measure between graphs! - -Let's build this up a bit more. The MUTAG dataset we are considering contains not only graphs -corresponding to the structure of chemical compounds, but also a *label* of each -compound based upon its mutagenic effect. The four graphs we consider here have labels: - -- MUTAG0: Class 1 -- MUTAG1: Class 0 -- MUTAG2: Class 0 -- MUTAG3: Class 1 - - -.. code-block:: default - - - classes = [1, 0, 0, 1] - - - - - - - -We can use GBS feature vectors in a `support vector machine -`__ (SVM) that finds a separating -hyperplane between classes in the feature space. We start by defining two-dimensional feature -vectors: - - -.. code-block:: default - - - events = [8, 10] - max_count = 2 - - f1 = similarity.feature_vector_sampling(m0, events, max_count) - f2 = similarity.feature_vector_sampling(m1, events, max_count) - f3 = similarity.feature_vector_sampling(m2, events, max_count) - f4 = similarity.feature_vector_sampling(m3, events, max_count) - - import numpy as np - - R = np.array([f1, f2, f3, f4]) - - print(R) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [[0.0884 0.042 ] - [0.0704 0.02855] - [0.06995 0.02935] - [0.0962 0.04585]] - - -There is freedom in the choice of ``events`` composing the feature vectors and we encourage the -reader to explore different combinations. Note, however, that odd photon-numbered events have -zero probability because ideal GBS only generates and outputs pairs of photons. - -Given our points in the feature space and their target labels, we can use -scikit-learn's Support Vector Machine `LinearSVC `__ as our model to train: - - -.. code-block:: default - - - from sklearn.svm import LinearSVC - from sklearn.preprocessing import StandardScaler - - R_scaled = StandardScaler().fit_transform(R) # Transform data to zero mean and unit variance - - classifier = LinearSVC() - classifier.fit(R_scaled, classes) - - - - - - - -Here, the term "linear" refers to the *kernel* function used to calculate inner products -between vectors in the space. We can use a linear SVM because we have already embedded the -graphs in a feature space based on GBS. We have also rescaled the feature vectors so that they -zero mean and unit variance using scikit-learn's ``StandardScaler``, a technique -`often used `__ in machine learning. - -We can then visualize the trained SVM by plotting the decision boundary with respect to the -points: - - -.. code-block:: default - - - w = classifier.coef_[0] - i = classifier.intercept_[0] - - m = -w[0] / w[1] # finding the values for y = mx + b - b = -i / w[1] - - xx = [-1, 1] - yy = [m * x + b for x in xx] - - fig = plot.points(R_scaled, classes) - fig.add_trace(plotly.graph_objects.Scatter(x=xx, y=yy, mode="lines")) - - plotly.offline.plot(fig, filename="SVM.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/SVM.html - -This plot shows the two classes (grey points for class 0 and red points for class 1) -successfully separated by the linear hyperplane using the GBS feature space. Moreover, -recall that the two MUTAG1 and MUTAG2 graphs of class 0 are actually isomorphic. Reassuringly, -their corresponding feature vectors are very similar. In fact, the feature vectors of -isomorphic graphs should always be identical :cite:`bradler2018graph` - the small discrepancy -in this plot is due to the statistical approximation from sampling. - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 0 minutes 27.124 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_similarity.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_similarity.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_similarity.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/run_tutorial_vibronic.ipynb b/doc/tutorials_apps/run_tutorial_vibronic.ipynb deleted file mode 100644 index e187dae29..000000000 --- a/doc/tutorials_apps/run_tutorial_vibronic.ipynb +++ /dev/null @@ -1,161 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# This cell is added by sphinx-gallery\n# It can be customized to whatever you like\n%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\nVibronic spectra\n================\n\n*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic`\n\nHere we study how GBS can be used to compute vibronic spectra. So let's start from\nthe beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on\nthe allowed transitions between different electronic states. These electronic transitions\ncan be accompanied by changes in the vibrational energy of the molecules. In this case, the\nabsorption lines that represent the frequencies at which light is more strongly absorbed are\nreferred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational\nand electronic transitions of a molecule upon absorption of light.\n\nIt is possible to determine vibronic spectra by running clever and careful spectroscopy experiments.\nHowever, this can be slow and expensive, in which case it is valuable to predict vibronic spectra\nusing theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few\nrelevant molecular parameters:\n\n#. $\\Omega$: diagonal matrix whose entries are the square-roots of the frequencies of the\n normal modes of the electronic *initial* state.\n#. $\\Omega'$: diagonal matrix whose entries are the square-roots of the frequencies of the\n normal modes of the electronic *final* state.\n#. $U_\\text{D}$: Duschinsky matrix.\n#. $\\delta$: displacement vector.\n#. $T$: temperature.\n\nThe Duschinsky matrix and displacement vector encode information regarding how\nvibrational modes are transformed when the molecule changes from the initial to final electronic\nstate. At zero temperature, all initial modes are in the vibrational ground state. At finite\ntemperature, other vibrational states are also populated.\n\nIn the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters\nare sufficient to determine the configuration of a GBS device. As opposed to other applications\nthat involve only single-mode squeezing and linear interferometry, in vibronic spectra we\nprepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing,\nand displacements.\n\nThe function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be\nused to obtain the squeezing, interferometer, and displacement parameters from the input\nchemical parameters listed above. In this page, we study the vibronic spectrum of\n`formic acid `_ \ud83d\udc1c. Its chemical parameters, obtained\nfrom :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import vibronic, data\nimport numpy as np\nformic = data.Formic()\nw = formic.w # ground state frequencies\nwp = formic.wp # excited state frequencies\nUd = formic.Ud # Duschinsky matrix\ndelta = formic.delta # displacement vector\nT = 0 # temperature" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now map this chemical information to GBS parameters using the function\n:func:`~.gbs_params`:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that since two-mode squeezing operators are involved, if we have $N$ vibrational\nmodes, the Gaussian state prepared is a $2N$-mode Gaussian state and the samples\nare vectors of length $2N$. The first $N$ modes are those of the final electronic\nstate; the remaining $N$ modes are those of the ground state. From above, $t$ is a\nvector of two-mode squeezing parameters, $U_1$ and $U_2$ are the interferometer\nunitaries (we need two interferometers), $r$ is a vector of single-mode squeezing\nparameters, and `alpha` is a vector of displacements.\n\nPhotons detected at the output of the GBS device correspond to a specific transition energy.\nThe GBS algorithm for vibronic spectra works because the programmed device provides samples\nin such a way that the energies that are sampled with high probability are the peaks of the\nvibronic spectrum. The function :func:`~.energies` can be used to compute the energies for\na set of samples. In this case we show the energy of the first five samples:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "e = vibronic.energies(formic, w, wp)\nprint(np.around(e[:5], 4)) # 4 decimal precision" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we\ngenerate many samples, compute their energies, and make a histogram of the observed energies.\nThe :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for\nuse in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a\n:func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see\nhow this is done for just a few samples:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from strawberryfields.apps import sample, plot\nimport plotly\nnr_samples = 10\ns = sample.vibronic(t, U1, r, U2, alpha, nr_samples)\ne = vibronic.energies(s, w, wp)\nspectrum = plot.spectrum(e, xmin=-1000, xmax=8000)\nplotly.offline.plot(spectrum, filename=\"spectrum.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/spectrum.html\n\n

Note

The command ``plotly.offline.plot()`` is used to display plots in the documentation. In\n practice, you can simply use ``spectrum.show()`` to generate the figure.

\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian\nbroadening of the spectrum, which better represents the observations from an actual experiment.\nOf course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's\ninstead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "e = vibronic.energies(formic, w, wp)\nfull_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000)\nplotly.offline.plot(full_spectrum, filename=\"full_spectrum.html\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ".. raw:: html\n :file: ../../examples_apps/full_spectrum.html\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in\nRef. :cite:`huh2015boson`, shown below:\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](../_static/formic_spec.png)\n\n :width: 740px\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic\nspectrum can be computed using classical computers. However, for larger molecules, this task\nquickly becomes intractable, for much the same reason that simulating GBS cannot be done\nefficiently with classical devices. Photonic quantum computing therefore holds the potential to\nenable new computational capabilities in this area of quantum chemistry \u269b\ufe0f.\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_vibronic.py b/doc/tutorials_apps/run_tutorial_vibronic.py deleted file mode 100644 index bee8bd0bf..000000000 --- a/doc/tutorials_apps/run_tutorial_vibronic.py +++ /dev/null @@ -1,134 +0,0 @@ -# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports -r""" -.. _apps-vibronic-tutorial: - -Vibronic spectra -================ - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic` - -Here we study how GBS can be used to compute vibronic spectra. So let's start from -the beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on -the allowed transitions between different electronic states. These electronic transitions -can be accompanied by changes in the vibrational energy of the molecules. In this case, the -absorption lines that represent the frequencies at which light is more strongly absorbed are -referred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational -and electronic transitions of a molecule upon absorption of light. - -It is possible to determine vibronic spectra by running clever and careful spectroscopy experiments. -However, this can be slow and expensive, in which case it is valuable to predict vibronic spectra -using theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few -relevant molecular parameters: - -#. :math:`\Omega`: diagonal matrix whose entries are the square-roots of the frequencies of the - normal modes of the electronic *initial* state. -#. :math:`\Omega'`: diagonal matrix whose entries are the square-roots of the frequencies of the - normal modes of the electronic *final* state. -#. :math:`U_\text{D}`: Duschinsky matrix. -#. :math:`\delta`: displacement vector. -#. :math:`T`: temperature. - -The Duschinsky matrix and displacement vector encode information regarding how -vibrational modes are transformed when the molecule changes from the initial to final electronic -state. At zero temperature, all initial modes are in the vibrational ground state. At finite -temperature, other vibrational states are also populated. - -In the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters -are sufficient to determine the configuration of a GBS device. As opposed to other applications -that involve only single-mode squeezing and linear interferometry, in vibronic spectra we -prepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing, -and displacements. - -The function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be -used to obtain the squeezing, interferometer, and displacement parameters from the input -chemical parameters listed above. In this page, we study the vibronic spectrum of -`formic acid `_ 🐜. Its chemical parameters, obtained -from :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module: -""" -from strawberryfields.apps import vibronic, data -import numpy as np -formic = data.Formic() -w = formic.w # ground state frequencies -wp = formic.wp # excited state frequencies -Ud = formic.Ud # Duschinsky matrix -delta = formic.delta # displacement vector -T = 0 # temperature - -############################################################################## -# We can now map this chemical information to GBS parameters using the function -# :func:`~.gbs_params`: - -t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T) - -############################################################################## -# Note that since two-mode squeezing operators are involved, if we have :math:`N` vibrational -# modes, the Gaussian state prepared is a :math:`2N`-mode Gaussian state and the samples -# are vectors of length :math:`2N`. The first :math:`N` modes are those of the final electronic -# state; the remaining :math:`N` modes are those of the ground state. From above, :math:`t` is a -# vector of two-mode squeezing parameters, :math:`U_1` and :math:`U_2` are the interferometer -# unitaries (we need two interferometers), :math:`r` is a vector of single-mode squeezing -# parameters, and `alpha` is a vector of displacements. -# -# Photons detected at the output of the GBS device correspond to a specific transition energy. -# The GBS algorithm for vibronic spectra works because the programmed device provides samples -# in such a way that the energies that are sampled with high probability are the peaks of the -# vibronic spectrum. The function :func:`~.energies` can be used to compute the energies for -# a set of samples. In this case we show the energy of the first five samples: - -e = vibronic.energies(formic, w, wp) -print(np.around(e[:5], 4)) # 4 decimal precision - -############################################################################## -# Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we -# generate many samples, compute their energies, and make a histogram of the observed energies. -# The :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for -# use in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a -# :func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see -# how this is done for just a few samples: - -from strawberryfields.apps import sample, plot -import plotly -nr_samples = 10 -s = sample.vibronic(t, U1, r, U2, alpha, nr_samples) -e = vibronic.energies(s, w, wp) -spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) -plotly.offline.plot(spectrum, filename="spectrum.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/spectrum.html -# -# .. note:: -# The command ``plotly.offline.plot()`` is used to display plots in the documentation. In -# practice, you can simply use ``spectrum.show()`` to generate the figure. - -############################################################################## -# The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian -# broadening of the spectrum, which better represents the observations from an actual experiment. -# Of course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's -# instead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module. - -e = vibronic.energies(formic, w, wp) -full_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) -plotly.offline.plot(full_spectrum, filename="full_spectrum.html") - -############################################################################## -# .. raw:: html -# :file: ../../examples_apps/full_spectrum.html - -############################################################################## -# -# We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in -# Ref. :cite:`huh2015boson`, shown below: - -############################################################################## -# .. image:: ../_static/formic_spec.png -# :width: 740px - -############################################################################## -# -# The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic -# spectrum can be computed using classical computers. However, for larger molecules, this task -# quickly becomes intractable, for much the same reason that simulating GBS cannot be done -# efficiently with classical devices. Photonic quantum computing therefore holds the potential to -# enable new computational capabilities in this area of quantum chemistry ⚛️. diff --git a/doc/tutorials_apps/run_tutorial_vibronic.py.md5 b/doc/tutorials_apps/run_tutorial_vibronic.py.md5 deleted file mode 100644 index d92eaf695..000000000 --- a/doc/tutorials_apps/run_tutorial_vibronic.py.md5 +++ /dev/null @@ -1 +0,0 @@ -5512f615815ad19b173f76f0a0f19618 \ No newline at end of file diff --git a/doc/tutorials_apps/run_tutorial_vibronic.rst b/doc/tutorials_apps/run_tutorial_vibronic.rst deleted file mode 100644 index 90590ac54..000000000 --- a/doc/tutorials_apps/run_tutorial_vibronic.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. note:: - :class: sphx-glr-download-link-note - - Click :ref:`here ` to download the full example code -.. rst-class:: sphx-glr-example-title - -.. _sphx_glr_tutorials_apps_run_tutorial_vibronic.py: - - -.. _apps-vibronic-tutorial: - -Vibronic spectra -================ - -*Technical details are available in the API documentation:* :doc:`/code/api/strawberryfields.apps.vibronic` - -Here we study how GBS can be used to compute vibronic spectra. So let's start from -the beginning: what is a vibronic spectrum? Molecules absorb light at frequencies that depend on -the allowed transitions between different electronic states. These electronic transitions -can be accompanied by changes in the vibrational energy of the molecules. In this case, the -absorption lines that represent the frequencies at which light is more strongly absorbed are -referred to as the *vibronic* spectrum. The term *vibronic* refers to the simultaneous vibrational -and electronic transitions of a molecule upon absorption of light. - -It is possible to determine vibronic spectra by running clever and careful spectroscopy experiments. -However, this can be slow and expensive, in which case it is valuable to predict vibronic spectra -using theoretical calculations. To model molecular vibronic transitions with GBS, we need only a few -relevant molecular parameters: - -#. :math:`\Omega`: diagonal matrix whose entries are the square-roots of the frequencies of the - normal modes of the electronic *initial* state. -#. :math:`\Omega'`: diagonal matrix whose entries are the square-roots of the frequencies of the - normal modes of the electronic *final* state. -#. :math:`U_\text{D}`: Duschinsky matrix. -#. :math:`\delta`: displacement vector. -#. :math:`T`: temperature. - -The Duschinsky matrix and displacement vector encode information regarding how -vibrational modes are transformed when the molecule changes from the initial to final electronic -state. At zero temperature, all initial modes are in the vibrational ground state. At finite -temperature, other vibrational states are also populated. - -In the GBS algorithm for computing vibronic spectra :cite:`huh2015boson`, these chemical parameters -are sufficient to determine the configuration of a GBS device. As opposed to other applications -that involve only single-mode squeezing and linear interferometry, in vibronic spectra we -prepare a Gaussian state using two-mode squeezing, linear interferometry, single-mode squeezing, -and displacements. - -The function :func:`~.gbs_params` of the :mod:`~.apps.vibronic` module can be -used to obtain the squeezing, interferometer, and displacement parameters from the input -chemical parameters listed above. In this page, we study the vibronic spectrum of -`formic acid `_ 🐜. Its chemical parameters, obtained -from :cite:`huh2015boson`, can be found in the :mod:`~.apps.data` module: - - -.. code-block:: default - - from strawberryfields.apps import vibronic, data - import numpy as np - formic = data.Formic() - w = formic.w # ground state frequencies - wp = formic.wp # excited state frequencies - Ud = formic.Ud # Duschinsky matrix - delta = formic.delta # displacement vector - T = 0 # temperature - - - - - - - -We can now map this chemical information to GBS parameters using the function -:func:`~.gbs_params`: - - -.. code-block:: default - - - t, U1, r, U2, alpha = vibronic.gbs_params(w, wp, Ud, delta, T) - - - - - - - -Note that since two-mode squeezing operators are involved, if we have :math:`N` vibrational -modes, the Gaussian state prepared is a :math:`2N`-mode Gaussian state and the samples -are vectors of length :math:`2N`. The first :math:`N` modes are those of the final electronic -state; the remaining :math:`N` modes are those of the ground state. From above, :math:`t` is a -vector of two-mode squeezing parameters, :math:`U_1` and :math:`U_2` are the interferometer -unitaries (we need two interferometers), :math:`r` is a vector of single-mode squeezing -parameters, and `alpha` is a vector of displacements. - -Photons detected at the output of the GBS device correspond to a specific transition energy. -The GBS algorithm for vibronic spectra works because the programmed device provides samples -in such a way that the energies that are sampled with high probability are the peaks of the -vibronic spectrum. The function :func:`~.energies` can be used to compute the energies for -a set of samples. In this case we show the energy of the first five samples: - - -.. code-block:: default - - - e = vibronic.energies(formic, w, wp) - print(np.around(e[:5], 4)) # 4 decimal precision - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - [1566.4602 4699.3806 1566.4602 4699.3806 4699.3806] - - -Once the GBS parameters have been obtained, it is straightforward to run the GBS algorithm: we -generate many samples, compute their energies, and make a histogram of the observed energies. -The :mod:`~.apps.sample` module contains the function :func:`~.vibronic`, which is tailored for -use in vibronic spectra applications. Similarly, the :mod:`~.apps.plot` module includes a -:func:`~.spectrum` function that generates the vibronic spectrum from the GBS samples. Let's see -how this is done for just a few samples: - - -.. code-block:: default - - - from strawberryfields.apps import sample, plot - import plotly - nr_samples = 10 - s = sample.vibronic(t, U1, r, U2, alpha, nr_samples) - e = vibronic.energies(s, w, wp) - spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) - plotly.offline.plot(spectrum, filename="spectrum.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/spectrum.html - -.. note:: - The command ``plotly.offline.plot()`` is used to display plots in the documentation. In - practice, you can simply use ``spectrum.show()`` to generate the figure. - -The bars in the plot are the histogram of energies. The curve surrounding them is a Lorentzian -broadening of the spectrum, which better represents the observations from an actual experiment. -Of course, 10 samples are not enough to accurately reconstruct the vibronic spectrum. Let's -instead use the 20,000 pre-generated samples from the :mod:`~.apps.data` module. - - -.. code-block:: default - - - e = vibronic.energies(formic, w, wp) - full_spectrum = plot.spectrum(e, xmin=-1000, xmax=8000) - plotly.offline.plot(full_spectrum, filename="full_spectrum.html") - - - - - - - -.. raw:: html - :file: ../../examples_apps/full_spectrum.html - -We can compare this prediction with an actual experimental spectrum, obtained from Fig. 3 in -Ref. :cite:`huh2015boson`, shown below: - -.. image:: ../_static/formic_spec.png - :width: 740px - -The agreement is remarkable! Formic acid is a small molecule, which means that its vibronic -spectrum can be computed using classical computers. However, for larger molecules, this task -quickly becomes intractable, for much the same reason that simulating GBS cannot be done -efficiently with classical devices. Photonic quantum computing therefore holds the potential to -enable new computational capabilities in this area of quantum chemistry ⚛️. - - -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 0 minutes 15.221 seconds) - - -.. _sphx_glr_download_tutorials_apps_run_tutorial_vibronic.py: - - -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download - - :download:`Download Python source code: run_tutorial_vibronic.py ` - - - - .. container:: sphx-glr-download - - :download:`Download Jupyter notebook: run_tutorial_vibronic.ipynb ` - - -.. only:: html - - .. rst-class:: sphx-glr-signature - - `Gallery generated by Sphinx-Gallery `_ diff --git a/doc/tutorials_apps/sg_execution_times.rst b/doc/tutorials_apps/sg_execution_times.rst deleted file mode 100644 index c61b1b9a3..000000000 --- a/doc/tutorials_apps/sg_execution_times.rst +++ /dev/null @@ -1,15 +0,0 @@ - -:orphan: - -.. _sphx_glr_tutorials_apps_sg_execution_times: - -Computation times -================= -**00:08.272** total execution time for **tutorials_apps** files: - -- **00:08.272**: :ref:`sphx_glr_tutorials_apps_run_tutorial_sample.py` (``run_tutorial_sample.py``) -- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_dense.py` (``run_tutorial_dense.py``) -- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_max_clique.py` (``run_tutorial_max_clique.py``) -- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_points.py` (``run_tutorial_points.py``) -- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_similarity.py` (``run_tutorial_similarity.py``) -- **00:00.000**: :ref:`sphx_glr_tutorials_apps_run_tutorial_vibronic.py` (``run_tutorial_vibronic.py``) diff --git a/doc/tutorials_apps/tutorials_apps_jupyter.zip b/doc/tutorials_apps/tutorials_apps_jupyter.zip deleted file mode 100644 index b28507ea598c8f25e909f1a757358d369c98c121..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 68075 zcmeI5%X3^;e&6XhNx8i6$_u;V*jF5+rpW+J03pcAUm;e2*U(=tp-}&i--^%X()!!%AuIcY@@Y!@e>dxk~@ib2d z-F`MIvUWZ>A00gV)g-yz%Laqu`cCqgUy?*0>5qNfot;lIzPjEU_p|Hk@vH1C>&<8R zc+~BU=c5@9jOK$u`SmcHrTui4@_8Ih4p&zAPBWbZgH5-mL8-?ZP)`pLn0QcRBX(b>jPIv8Zr^Umm7$w#xKmyVKy zEa}aQ*?5?L$&<5ja(bN3vZvWJIUmoHL4J~TM*n4)&L)HLY>*!$`Dl=jvg=`mPwd~Y z&bXr{e1_@tq(43#RWPapq1xiA-E>Bs(fc}+M~ufd zrbRos&#=Scc$7@0V>UR{MEPizO|xRAgJ(Sq-cRDX`Gj-rj93it8L`YkIz7t3&LMjZ z%N%7r0i^&bW~W(ZFw3XOh>bDj-f_l3R09sva~3gW#>tqOW@C*$9L`7iEYAvod}+V- z_yGLsCuvczUp0tyB3V1vX%+ zKsPvFXTyR+TATP8PZy(QQlVa!At!&gj%}?@4*3Z-SczSe8 zzuhua>_!N@v)}C+RKU4Wk^5n%$=b=ho{x@fRNEA%Z*-y)O_QViDW_OK+UrdS zgl9KcY_-mSO5h+X-v9LB#`n`{KlwQ@;jv>L8)Zdd0MSWwM)!-PI2Ob?u$^8$%XaqD z=`{UvJV>`Ux9-gg7QMgD3i}yosvR6=>1+z%5|PmPC6( z1Ynukqy6k@-eZILEGg3AWFTM~&xi+AK$5fL5QO7=zGd(7qe^<2tFT9#?x5>_9s&iE}w!3luL}f4Hh`PN6S3Di zqsFZ9cu?1z>Qy0)P!mmM+orwdF&RzjHUq^fRKGG20A-peU^Zq#`cYD)$v!9u< zg_MP-NX~w;RsRxvLybZh!j(lxIfwc}PUd332L+ufs)1d6F?I%r%ul{I?RVyzTX(mL z&Ky2?`!jyq%-$NE=gAbr0`d`I_m0uKoWcWs9VXkGE{cV*y(#>XW!V-a5$c>xOmwq- z*H=|!>;h``O?E~rR{4@a?tLc=6Mgkk&unpIglwsiIzPIBb=8I95o}5< zYO6(#=LiGsAnq&Tu}dEIVEl*ONwGpoyAah+zSZOOsq^qt)F*zPrkp`PJDO(VJSf&0 zybcwZjE`1>kT}KOB1a_+r5P zB^_{(_oSDK)q*wbiqmBrG4%+bV&_Wpw|L&WiQwFR=w#8lQHK-=x*I^%uHe@SudXLs zZ?3~h^va;?D7On&YtP2r$mjgy_(nVJf5Foc_~qEVP(rm;O_cS;OnnGVGDX>G4YE-y z3~=Md1u@W1$Ft-h?Z1KYkQlxX$8c1`?(NOZ%@Ug=@&{N0hE<(;|K4rbTtC~)oCQRX z$GwWg(q7Q>v$Qxj+W|gn>g?}IkuxgA@IoDc*A4gwaUHN7&1a0=yU|C%>4^t^oDMOn zH$F~J(;V&R2wQ%349<$&nJVaWIxY4ScoCfHQEBKUADE?gwGkWWg0+LZn6==dM_Fro z)3DUgbUZ5&RZwUf6hKe^)TDTLZgAcnA0EoaZ|g~(iSNo>1q;(*w%fTL&a{0z8xA_x zFA5>sZ6w6$&Q4;}j^y#!(V}*ecKa6p&(7Qc=*nil)mS5(_HAq|kO|9^4W#%%q$~5^ z4Q$PE=lVNuAK!Tgl+WJ2#Sd>!-VtY(MJsN&zb`;60_Vp5ehtI0;R>SzJ!+;cytC#1wB~jxLE@tt7Y-XJCRs1Xjn(&=eZr?HjuoM& zgQGTsd}iABY&mdtW;}`Mi^e9Y7NnJl}-JMczC&NA3rq2bFgUwVnn%HN#*tA zo@)WPY24Afd;3O`d`q4RS@j9v5)f}Y9)O~8NPJjA1$X0PylConDJiaILdc5@ny(hl0_2$c8+1jq& zadmQ0A&4VE)HP+P{Pa?*OAD(+JiG!gxm=8Vm97@IC*y6!*=)UeB@QPp^VLr0>!^Dl zDaMo&rNXl=HUcLVH?0^m;k@H#I_wvva8UB+A+c2*jYdBM~f; zC(dZjRXO3MUqoKmi(Hcx4X`ognA55je9Qh=RMlYp^Fcp3&IS{CgA{1tHNsTHmMRWv zN={(TNHyceEL@l|w24x@0YEeLWaET^WRYe*NJ&XfR4{S8-lkD@SwOV)}Ms z-&WtnxBTc(jxlGV=y6EG_gYCg7heIw0_B0{ zZ->Bj6?Yad=w@f=3gMCK?a6uh^7Ua3Ktia0#$&h6HWtP&CtmveAo$Q2UXht>x(X7L z^CN|$i|&Bj0ed2Bpa#im{nu}|-)!Ft;9@VfkL#lYluAOMjDJVz0p$nXVLlpb(9K_c z{;0kcd>BhBd`um?3^B_#EHA?#?8RAkULwpic!uRZO7IPs)*8lpc z5P`R9vRZ@%6d%YffU`pE-kDxzJTCe@jiC+3%F+mCLdl*?dRV~KUDAQgE6!VnMBKN1 zlwnNGQmCnKu~QNu3>M~8B+Uhbm+?ihVu+EuyHanVQG5X&QHMB}5+17P22S?^viw~$ zk_jD2pJk_n z0Dg#iCu<+BN&k1ZrXGwsd<21sLI>EpN{A?@!4>k)3+Uq-bP*^atSJxjBhnSf9SRtD zKzM10u#PSuJtqq{_*w1DmATRx8Eh-p%;_;M4MHS}NCzp`M2AviXrP20`W`W*3J-vZ z1^H6M-FXQ7Zr|}h-`@TAzPEAzkuS$x!Jt$KJk3C8n37+WOi}{&Qde;6fDOuM*$DHR zpAr4)44H0l4cQlmk7xXy$@?nh3<`!O31NwVBP3f7DKDc4Z8w#nqTW zk%^v(xrTHl^bt?x6n(mTG6fPd)J(qGT7T=_ovTf*f}#Tv)v8P%sy=P)-M5hBJ%}&@ zs(kDvSw@kTMZ3SSEF-x=F*N#fQl_hvB1wn^t0zR(%-XhOZW-j3$o&BZCxP>IVt85?vHHfHNZ^W6-41=s>g*E8&(Ckh%Qix6H_US|OE)y69JX%$am z|8V?y{TB~Y8{??-#gcJ*uxBN z7-nN*&vx2(4xe|fw{`64ti>MnL@O@QMsRxRT>m`mc)IT!(F za7o%r!L=6IaP8f9lkcPFqzA+eZP@7Fl&hf3UZ_96B9P_fkufgRtVO(B7~~u2D$-a8 zk1WNvjx&Y;CCYTJe_$0qf@lo}dbM*sI+T30$2(h~O~kd%_4}Ah%1^Jo=6vqZ zO7S1SF{vF1-e~308CF)hwWk_E1i|I(nJ%WSM$XNTz6uvvDqNr9HB+PnJAAob^Q46|BRis472Vjg z04L;$!Sg@G^-c1mhEoY?D26l8TdB$wFQD?!mg(uGWU!>Tx@5q}-dsiLbpl$+5^SWb zzswXUvx#`({2;F5@y__c5Fq(P>i{cLd9s69kKmZI|+3(wK`49=wxUYYCfYhBU{1iAcbDy z=P5DCWeV>1#k9A8wkv%+^I=eNF29dwVW}7ALx8BQ*zIR66~>Ma(W#;(7S7bOG(Z z|IsJ+-+L4)gYorCKXw91yGShkc^&DZ{&`z_msjOB(h4&solV5!0JMs55BRYVb&nau=hWc0WfuU*_Ed5htkqxt=so8X3Z4;f$F;jZB#TKSdKF3HLF@vQu_lp z2n`y%@bi38dKdXdn&mb!j`trf`v70fO;xSY#(OVRCPq_~z;&%WQRx&fw9M)wNdaou zW?Jhbw{)Xx$nsKxd|}`qeI=Rw!?Hqv+JF*}tA)Ek!owO3Rmt3#WPH5D4yc5FFo*(C zv5vT;$7tX*^jK2DpJ>{T@NKA5#A3ck5MI%h@dI@PDN{RKw28I2KGB@|qWl+bk;+pI z-7E4d4|kBuy#WO=D6z5N0&W%-84ZosLO@5rFG=V+#>IymEoJ(x5YzFYML8Wm4>*09 zYx-6#_0{C`kUO#sLZy6U9s!#2Wh^)D{9LbklnS3Bmf z;~0=SwxABV9;9mgLJ8SKmZFgnq=PJAvp3AxpzvNkEBKGsO_Q2-gh>eRvgjN}%8;sR zB!)M1a!mtEKO}U=Og0a*<9Eh&wiIwA&-a^*ZAd^+fly8dlRSm`z1dJ|9Gf5_;!xO&xL9T2ZpGU?MZN0E9;FOqh3ox)mYSnJg zqE8BVm{(rHU~z`Y)0q-QscU3b&kiaJ#F=48xhpLSyvUT7QNOLON8>QDHAXb&D6fx) z^7;e|kQ4{2K<}x_`IN?G_%zjLh8lX+h!6x0xxpuDoAsr-m2JnYS-J$JgMC{OE?YVa zVvbtSSi5Ztaw;WMGcxDov{no@>d<+=8ja!DRhnl#QQZRC4VW{4rAU>hLah^xzUV_l ztj)GW61w451XEb`7b$hGM;PTwYPLuA+rlzQG}dxZt3IkhzLtwY-m>5>*xBo5ASp(= zxIWL{W$BC_(n4eDy6q>WQe9<39F^sau6?}bd!mDU?TI6WNY zDR^17#pAJ!Q=$a(qOC{)zmk^?Cmz#8@D?5%EUH`KEmOF>ZD7c75kYwwez{0giw3O8 zPNM^!bt+@*#Zw#~U;R5Ryax_2j{N5rsl!vMhORU41- zM(?E98I>p=i>0F>2|1`02TcdPorstn$O&DLxj;8C%1>ZQ4OlK78koDJue~fV`E|5a zLO$-54yoi?m}lI>^t`~Lohazy(9ze83^J$lGJ{My?hua+=;4@277jCn))>Itx_f%L zkJxPkS?Rs;iaWnt=GaTJ@!iZKFUA_=omhK=cJNrjCB5yP92MPz0aO_uESn6~y2-|; z9rDO7tiFp1t*DF%8^qkG6Ebu{Wt6aDiI+x>;@t(JFzUw=8OmLF9|w`iR5i(iP{1CI z1&x4k{)DK7r+i*Z?={_^k}(<(HrT)6d~otl;vI3Un*#ztRZ%8En;Df7A55^|+OT!sBcSQlFYu@R{joq8!#Qx1OIeU`6(@tlLoe*A2s)A>WbK72NP-p%>NfBJ$?f4qHb zyVEIhe7HZ}e*R)97~Vy$#oU`FB%O-Zau~;6+8MRJ;@^_Sw9w&Sukct1W0*>LzQd1C>%F3Y!R_Oq-LwKbxmRe84kU)_tbx@`mipj82W6EOtzDL8K4Ef3f~Xnnk%+}&)`!jeR( zi_?wE>Qf^#H>(0GgL(q?{ab7{mzyvEBtV_X<29_zijv1AH9Wl$aW25FM z1QxlIykLGqLg+xON(k^Pq*{Y6!B-^P%@sbxa_5@~ zfL2%b>q6^^#@Q*otbC1u1-ZIog+KyRPz~t_Pw!^5VD(#qy}6SB1xIBk;##K7I*YPA zwg6i>zXM2Ely-HH_D+t-e;(nIKs=WUy}k=9u6iVH2wgISEP~$4ivv#zv$TAs@;=34Xswnp6`gAh6>G7j8g z_wi$Zx{Xv)&Gnrp{19I67PI~?2Xh0NC2)+!IrMJ+9mi%XwQFtP<@FoM&E(DYrXC9} zAh5rOH$@(m7WEVsFM`3?AQx6Zv?q?(v{mW`ANPMiSP>Uc?P^`*+gBO`UTC3gwO!!| zsK9Xsg7y!j=WYc(7hqhVsvFAuM1*Q_BwHN5z>!2o>6W=i+AJV&sB3cI5Rh|Vz)MWA z6faTzvIB2u)kEhy!}JUF>9-rwv<6gFt}|n3@2@L45IBW514gTMjuBBin$qzxTP}4f zN`wwQdTR@g&a%Mz;~`XW5g27v=T?&}armLNkdX6*tNJRdJFezxWAD*H(T%|B2CxUe z@=-yqwMt;|JyngcDFiEaH*ef{yaB@O7`HO*d0Y5h)5E<;7j69UR+7_;N{F*+GSUIE zWYaPNYL4sLoflRHxv0r^MlaXp;j^_)uR*ITE_K>lbCGcO3$lG-+{vP17H?B16)W^a z^SXs@E%;!m{zVl;>Zs=v1R9OesNPvCenLB?{;Bx2XTzX4Se-G%uMo5gEF=4PAse`| zG+u&|%`FHrfx#u$JfxqSn*_bIHcq@z&M8-HfrJd`mnJj>1 zV0lCU*eUOuvdqG~6%2fa78IsbkgEf;2)9_UP}WWKO+#!Tt0M=@=+#BY)ayBWH7+Wj>&K_Y$@DZ-+}lQ6_;3BfqO>cYBeR4(<& z@p$I<(+VKbj)*vuuQKwGh+R`rJmbxNW{s|>5s)?s@@?hMf6>`0@ijuKr zkH_>1ubxw=$fn&0>)?^`z*4SS4?q3vM)fFiWN%@BF&#gpdh(9;uG^}RlI*IOTRR#; znJh2PKsF{}1YVdD2kXa@Q(ww>DiBHh;=o|uFm0T`8iF#k-8yZS^%B&`#k74;Fhe?| zWSd#)2(=LYj5lN0>-+n9j6y()g6zIzladuO*7AWc2XslD^;Iv=qLE@0FejuX*+?r} zO;{?XdiR6P<0lyOJ0HQZgLm@Lf6P^H+eI<9K#phg%X`VxE(y%uBBkideTz%E{+BJNA)M_ zSb0DBugU%Po=o_n>r}+2zLK{8)1pO{&sh5A)&C$6-0t3Vo-tU_os6ch_^S69(%fFj z7FWP<@dF0EDAW3*ke1Y=lvO`fheaGNFFf-B~0dtA#^sMhN|J z+q^hV99E5>awSNWw{tLze0IIvZPF~y{4K0*mh?}UCy#+Paf`l4<(Da_5z*?|1J?-FP>7#HAm~VvX?gjS(;j1}O+J+lU5c=s{dz2Sgzi z!~8g;uc~&zd`8WQMW=()luOnS%wiyM5#Ha7G{f(|s9b@8z3w`Qe1Me(;g_?IcZ- zH<#|5mr3gE1%d9!M=bCzx(d0o@-(7r>24C32i&kqGBpdxWtYW6;G5C#3yjv;XvuCp zP$4gBy>Q4>1{2?y0zr00o2qQx&(a-JVV@>*#B*0HcX4Z^X~^dj>`m}(Cix@}>0hPQ@YcyRP>YCXxph$?^?3%X!AY5?GIY@lA=o8pR# zIW?qh$xA}NA`{MJA2$}dv?4_j*--PTfXk*xGhqy0m3D z|4g1zZ)()8vp|!T3T44gS68`ejR$ACPDhu@+35vsu^O{H4rz-O$fdW%#RYwgrO006 zNc><0T72dz89-I_J=0M)JR-v(QpnHY5mM3}Sd}4iwb=yM6sRE&oy*|~x2<)4^*gKs z=20<=YWwO3SiQ>6x=YxGsc{YO3CagW>~UNvL$0CFlBIhILQ8`sNiR2bdpFAq-Flvx*iapxC>r-&hOs|6EH zhZv1H6O0^BT-nt=lNZ?x;o@UE56s-SS*21m5+mX_mpU<`xkz{_+yys-Q&{aN6Ygk> z^+KdR(hw2RWGOA~=Bg$6)IB6{UtE9b)0DOjI6KfmM$0#v$>&3K-y-8#)GC3lA%G^4(CLqD@B|mH^z0xL?;qgWs zevh;e-MoudhNn~a_Q7ze!JT+d4rA|)qku{uE%~Z&pZvib8H;hGP6TfI!))y?$4VRp zKbdzor2NU;>$=y3m8&zROez^Nfis(s0*#b&VNbK=ICzN+5+^$DMo3Fc1>X7QnJ*HiUwIG*J! z3-RoK{ccUw3aXAx6BQP!JBjI}R+i9kvR4V$*mva)LYe_(68K*;WPvFH1rE@Yk?shW zLpr#4LXLLus29Csu1i-3p}^9)6!l|k=2Lo$j9VHm2Kc zs~kB=6NN-VE{3F(>~w;9DAG?cGB8lXAf{>i`Dpxllu2iTx@T=?WGdir_mKiam&WJY z)=~v0N{bP22tZv_sb_1_z%6}PJrbb=md7hjb4tK*x9PfCH!vg-Rv7~de(1YTKHt0l zVH7O2bp#;yvjbA~kGI}>Ym3wn3VX3X)=x+PJj&E_Dylo-K3X$J9K4|w#@4-i*5Vmk z1>-&LQPnEuF0336+PKzJ&Z4y-&#A{TaV`bIOK2X<>Ed-XR%WG@#>6mAv|=nWSci^= z_FNX*vR!{1>BkOj?`2Z63O~JAKD*9l@fe^Ub{WL60l0)oJ8_?vHwuYMW+Rnj6a1k2 z^te>tY&dy2FdGdGJfF#9x;^YXun-KvKJ~mAGtLu&f9kXsE=DjFQB*_M5#6!Yq{ZTswY~OQ2a*)GKzx{T&<-cx)U$_0&?eObu|Mj+h zeFcj7I-Ry2ncpC!n{jfbn213XKO#6441r8JmfuJ)OD0@ZL+PJ;`Aa&X_EqgW}4 zA-r+b!2WCgWr7$lsD8U7(ZP_7ympu*TQ;WC*L*DmMLOZeM|u32WqJ z#DGn*ndf+Pr$T)tnMzdat=r)jLRvr?$KuKAZ5t`xj#utAxr$%`^osdC! zQ?_@N7!=bg6(*I55b9dOLPDrOAr8hTM8PdlSotWaD&VGTiJPR@?@C^vV;d#cuiVJ> z>nPUmQPpCrz0Nn>&ij{0zl}iktR2rQy3&;Q)&mP zfvp`wOwGqF58kZ2dh%^`WoZDp&O3npX-vxHhB(d)+PECZ(I79YQK}NBvX&?+ z9-vt1c$8ChkJE;@js&emx0l^(I`WXtO$w_rKr%%0m?QRb+FCos#w^04B2)7?H73`p zEzrYdK~e<#6dV_X+40rjpfgXB?Kp1Myh&k^simxBUgNKn>UQ!#jb|q;N%=|UWg`2t zD8^hE#`IzIT3jgJfre+!ODDoMdW75Q7Na)bUMm#bP~KbdAOps_l^7R`ymg+^UUTut z6UZev;F-BF-Az+^|D1jSzw+`S3GOkbohZrkphVN%=xqv}dNgJ)n^opxH)ZL_jTZ35Ymg+OYx&xjuzf2n6QjRj$ zbTHX^)IBUvdsQUh!*q$`+cQ;KPCIF2OKI*#OElI4gCi+SM`?*voss9n*8dyQR4hxk z{<(g(6e^HT4JU2#*TGt@99tnZumo+(4e5)rq`!}IcwGmhBM_+T;(-f8S5~d8I}e1o z@W)5SAe4i8!GJQ?r>wg0^fau1%F`M>#r3)Ecs-o_njNz3OlRzb)N*i#3b|d}Sw^rI z)`RDP7*AP{$tfZ5Xoybo+fGp;hX5&kK~MM(+DBy{0bUxAoWXcw%U3)Xkj9qc9Nthks~PvD|< zt0N4^fM1gs?1pAt?ywJ~WePeeVKFF0%LQjvkLGJLl}hJ@4b0}1UzwftlvIgoV$~dQ z=Q(N=j8FM`L{4U1w5YD$p+-*?&5cU}ax$VQETKHFyj}q2A2@k^@!a2FtEOnKOa{IC zI#7yIbXzr^=$mfcy|`=b?*s9>8v!XIx_et8;Z!X%EF1{xoG#P8U%h2WY3b6dj~-9e z?Iwc-^*vk2!+}1sRqz>q@In?VS4pahMJrx;j1^=Yo3=Tas{Egi6Mee z$zjc~c{LQXNAK#nrt}V3%AOK#vf9ccZKhHxGi&*?|G=c-6U~g6PZwbFI$&rGIUPI1 z%Z_O&_=tut&aG58ut(f2@;($W zT^}4dlkNWBz}Z4E&HsZCSu-Q=Sg8UPs>q>qqejUn^GY!9DRTu~|mqKXkErx%TLeHAlE%)aqDA$XE5 zKWW%&zRY&=C^Xua27#-`N>RMNA zL*=#d$u+lN8-aC<&9aPiZK9FY`5+Fxv&%<|^F42Ucw^0LW6)Z>Qdp{oTe@o>()Dr) z7H)*GP*NjhsVhm5|21wzI`nC0$K2s?^1Qx7#79eO>pt6Rd`_&~KIKA)8a&pyNomlR zzdnBd@pE?NIgv^2!Ixa7MAfh_BiUi@FfspVrfPIP`1m=nFvcHiZFIA^r=85k+=#ZI zx44zuFdQkfp(lNFMx+|F_abrn5v;J2?5v19i}sOrNc8>dv*<-m1n1f6RCC zUP!`S#Z6w~>cjCDJTDx|QyiFTRfjQaU15xi#O&Tx1B*?`ta0YjJXZ0+rCehZds~93 zGqiYeY+<{dd~ObuH2H*Nk1TmFoeprsVikejWr0C8$+_*@P%b)8eBI!S>;RaoLelcs zVR4Agq806JkXL1=1wN#9M$xb`+Ig^Y+DEL)*Bw*HzM>7ortx6v%%bBV1 z)qwB-usl>EqkSg+9 zxFUWd$h*6uJM7h?H+!&8 z)6}`_tPRI0G^mINjCXlkCupNx$q&N^jQMyW(Sq6nLYM~n&&eZm+O|A~a`*;xJ*}bq zZh2}qM4|z%(18?Hw^WDt7HDapuF77XGM!W8POg~(^<|JU_VL8z3G50tWOdb& z)hifsz22M)hUqCJQ&^p%3u?7dP4IX`c#8iu%k@MhMO{}CLDpei(Tg-{1h%wp>v#=k zl;$hPTEX=WG-wzTitzgJ8Ba>MzNkDCC8AOHfJ>nnV5xNttCx{PuB{lGCy0w>e6nyM zx_d8VJ-E}tD|}c`Gl}Abzd0Mi3RbT&0i=nlQd*D)8C@&tWf?^Ly>iFzbm28=JVBKJ zZs9dlP}rKf1D=-kBc%^k(j%gTtsSH=hREBNGZVsDzVpynp zqa!sG0Pa4lZ@~l=txnPL>IpS=o%@PSGdzcmR6^vEVW0~-c#oqiiy6utH8(*yY-i-{ z0uJUm9cC>xc|PG?!gUr+Gb?_fZYVep(9x`NNfck|Ycj>{$^_q!kd1bE#RRQi&#^Aa z%0}JKQq>O%yXeI4Y)Nh!z;5ci6$o?mRdJB6KLvJk#!eJmwDj6+1U+;2B1i5^uV-oN zjH4M~hhnY`KM<2R8@|)co#Z`boFrS|rhl{*Kcdl2_-K3iqucQ#UcADoik5Bz%J0TT zX6Dj)h0M#ZlXvwaBSd3iVik)tE~PNpUtN?(YVz@HHYs*)anF7GG(X8F85iZY$J3)* z`t6pvzoxT|U_S?1eP_ShOK%|jW7x9S z;NwG9f5-|B8RhWK6R#?;z9pYsRGJP72oeSHM$jbdkL{W?VYzo46*J!7cL1UL1m~Xk zdyNnRPMaGL&Z{ync}R(HWPakkLBO4n6Ubzh3%{05fuQ}A+b%4a#C%g3TnPCw zS9!UsD|A3d5cqN|EY%r(nN6k3RAH8btVh3Cw$zvO7iV!X720+~??@Eyxvr>V?oE(= z4tErPhIh$?Q$qfTsDrU z^fu~33_<{d|dUCZ+BRf+Q zg#6YVg4j*U#j1?7`bIOg;!1Qs8wyHRToga}KJ37p9i;!_%IRDm7;bj1qfHZdGe3`r z|E4STH_Kb;h47}}Mq@{o$%u^t_&z1(sQ*!*MurjSsuHB~H;8uCfKL>WAgd9>PSCKH z&}vI8g>q`?|916{*i2BXO>1aK=%78KJHiW0T!&F;*oDIC=e>%Z@x8GizQd?{$-%N@ zVO5=Cq4oX!CJHIYRm-U+NL9o(EK;(+V+4iD6(iXl29g8iD${KLv%L%A#EzS(W zUV&Bmhx%DB*Zr8Z?m_*QnQReosrN9hpxdi+8n);aS7lD2!y4)J#@S()a#O^2^9zf0 zDZ|>oMdRW7jZ+>?ZY8IWx1NX{#0!>$O_r&LRK-ZwZI#0_E-5&G1oT=S-q8u2>7q++ zGE-=ji!Jr+<01De+Vl4MnJoOXGronXSZlGO-$7Y zq&q(Nf+H;2@H{dFrmeHxvok8{cF)O(kbu~Af-gGPUtHtJB2ZU|OpHi(jQvh%nKrxiz1f*kqS z9~gL9&9bQcsCCOH@WD^ekrX_^Elm-DVo{L|*-EW0$!`F#AGf1N0adMBELs+^Gry|4 zs@rKNpW&+V+h0h5iEi|!&oFgA*|71wm$_;n5aEbXSh0x>G*&xC{;m49*PU2i>Jh^Z zUSAO%mu=C~f|>hGr>uWFowELdPFWlO`+xl3Cu`TP>Fm?O2 zt>;0S4C!YRVh0$+o{E5M!NZQyE;Ghbb}H|8Wu#jJB%=mnubH{T)qbS*MQsF3nKEj~ zrn&ou8L0N`k-l-nB8_`HAS@t^kuKBxsJx7K824k6#yr*WDh>2}+oRpDW;Q}_>X%); zT_)A?KLLX`(Q>E&M9WmhF+Cm~DX?rXV~NN)3EXh&i~Q=Y9o&o{zZtM@K6Hn(*+;c+ zoAwdX(qhxun2JaZ5jM*XIDyrmledL3&9HxWc-QvJx3&qBUS!lt1cn}z(#gwQlSL%K z$xjl^d?Z{#MUZ#J)EUCSR-0p5{d8RP#(1jKWO=F^R8bNeeZrY79IVB)Y!^HW;@G`W zWdGXTDmp5>^je%$;CMp-b_>@$>-JPCobpDY=1pOK_+e-Op4GV7OxC)RwAQ^>D8KAGN z=i9%C1!2rAF6Z-Zr!(Wm-DmGVXGQPNDR|}@+!K=e4M^pp@iWaFh-&ZA%Y;oKI2WGT ziAuOMI@%qx3hN#{qq%n>+Q!fsTXfk# z^46qQXI7G2WK!ll(l_B)L|+Dw8s(z)+fgO=XYTf?ZO9WWK z-J@o1Bi|w6?($M&2`SijK6=WPm!!Aqf_8aY2AQWWLw#BHrM4nw6s#;W)M4V356-3- zwLpAnqo+Vujufq*=6j9n#ey?O*HP=(MB+XPm2~ctTAX3!k}$5cs5<7lb9qlJBHUkD zRYB`oa)WHKANfe5;cwhRZ2B8mu_~%1zbPX-i1%67S6pAJTE8b*(-v(n|YI)e9sJ=)cy zII)w?ynpX@pa1r@dgukP+ZF3u4`9$;qDH~Nwp_sIAjSw7b3%b48_-sVzX<8q!B38u zJmrRu>G@%v4YVL*OR@Aii#*S$=M-|;P$!(a!EWO z%B0N1E0KfHVyV<0#mI${P_?nr6fniWqb&KK&Y}iUEp^^m&AJtc>eal+T@7greY)I5V5?5P!!3cFOK(U7v z9I2$4ilvzsbHy|Wn{yeG6$$Xl=iMz*aoHw6ZSzymyql5@If%|goDDUr1+=}wYtv9r=1|fSRtj)51lrP|L=f|G!MSg^8S0R$x9beYMf}5XAFE$&Akz}vh(q$A za|Cpy385#&)G3K_@=-#eV$CoQHZr>19wRNQb&+18UBz{$Q(9if*qNp$4u@yeu1F7e zr}K#lnyAb4WNdS-dY5c(*dT=0t7f1JxJCk|_SqY#(8@SoyyC4qrAd~u2*)f}$mLWk zm4bkhOWIJJj)VhuB}6U96B^z2iVsOrfcGTvSp9I=O4D~>?iqPz%FUGC2Jb1O%a0=z zt~NW(>dZ63sO8cW{wWrUrVuPtHTbuK_xiR(E4N61Qsi`=mA%B zU$qit4X?=Oh){E~4;9VG?U~U}q~F+ZqHMfJnqxUG7YQ z`SZMzX6Pd&G*@tZl|@rGn{X+5KB%gkh476PDQHrzC-jIw#MCqkHwWj#Dw*HOKElpd zAJL3zE^Vr1X*MN@sJr^Q@7*q zEyVO0m!Iu!Y;A6m4SGgW-tIkq6w|OZHm`?`?==F#@xx0U6>csP|IP#R1P*BQ6pd>kPN@?#7-?j+t_e^;Au&9knoCS- zChUZaM5(|(fC-LfUy?~h5(O>%C}`3HL2Hfhh>Vj=BNR70f(WpJ_nc=r%Qo#h_&ZQ9 zJWgKod&9>c5K1Ryu7DV}HX~oM9VqQRTx=;-&1ybox=398{%{EDG7W=&B-WBYhReI9 zW8p+e$!CX`i(PJSu0vlEwHxsSF^fZaT**`$OuX)(V7W`N?3Wp{m#dBOYDTrFt|+mh z8_)RbvaH5n4ZhX*=2A1S6)Sx;Ech32X-H{ua|{9ndllbN(wLb9s|X8yGM-`OsdKgA zgZ!wS+=gF}gP9$+U*L-N_A%<6b^5AG!q?gF8;_r~+U;9idEE2fEk{m6$M$4&gkZD) z&(3MypB?Wc-?_6nIeSH#{5q=>vrWlb@+b>OAtQZyqQILKX_pRHW22r<6XP9`UDjGN zs=y}lF0>CN{7tEg4~AG!l*PQneLZuIcwN+>rv&W2PfXboxTW3=lpC$w;}D~kM#@5T zAya1*@6^LewQhe{>tw81o-=gG+e4b?8IOb=>W1c&kOaS*8;0;I$H5|`Zn8XaW)h2} zSfc;Lpah)R@UdE{+rF&R&2-Gscz2+0E{GjMqevVgNjOYx2@_mE^e$$v*58&uG^P0? z)K-5j_;gswBt6Kf=$5%`&Sk-!rAt(mGsyvnusFxBz5V7}2c7w0wt3iIfi>S`CB6Px zNx$^(e*Kq!oeZRF*VcaLrw@KByZcvvpIp19zrUdm!=Io1L_ajLldKaW%@{ zk=^u9fBKidce-)yn*Qn@!1(89Klzn67DlnMn}PZ~FAwz(Z~gg$KkohQpZr?u+BN(hmyJUMcCn7{e%PanMh*Z=t2>&&gc&0&7_bYYla@8py7(LsGHdO@uC6lJIsI@X-0$5%n(D4WPmg3dR_gx zyN2~k^@GjUh(^3Iyoi-JKa3zhAO!HnhBk>2#2c@IEVA>?*?42XI|I>wpnSgPoO|DU zRYi(&*f!HPHQiP3-Fxo2=l41H+SSAX!~pZ(UKeD^PZ`O+o*`z1b` zEyo9oG^OlUhEe~gL!#4tnQr6CgYQx!Du>}Eq11p`Cu`a zjJI2#PG(2dYL?zbU*5WM<;vx)Znronr_=e)e0g{>E2pPBY1aMATbH-K zrkRWT@f|PSY5(Y%YY(da>3GmDhs9B~CHOe9W%`vKWku#c5UCz4vZ$H0du# z)p$_~&i!k}J|FD&uJC! zR>g8&9Tkg7F=B*aQ5I)|qpEmZ&IZ-uyqFvp{mHmrO&9b1Vm|4E%VJzD&L*=*^X=kz zFfPaT*{Ga7suuI@%Ug$o$#8OFYpEYB$4AAe95Wuvn9aMzU4|WvCgWl{o3Ox2fS)m74$Ij|Rg9M-)*4`rtG--DZx@qU zFr$3l<*A`6J=u(Rgyk zR6;{l^o*MGQW6d?Z~c?g#bP?&zka`rDU*Y($RBgKP+fq&XNI54b0 zb7RQfHoGkL0pWDN_o%lcyw#HcMD<_l@No(0kj!c~n8NTE}r!E|%~w(7*H5`$o)gQ``3Pa4=-+w2v&5RZpuKJL#-_c`Na@ z@zUx@Y?4i4AkQYF77T@0mVH|=W*>y4{h2Hm)8%5$&V935d`SfQNJ|+`CXc=-Y`qQF z>e-daE^qB@{<*vbU&8A%_I$pmrosF!j|(`Vm_c@f8AOGET7r>o@$KqMd_Jxuw|IG< zFZO%C;ezg{T$Bhoe^z2-I4N1ktlBxL#?{PtH&81UTY1~R>BUrfT_~7WLnKSu5qs_O zR%_O@J43?gfs8Iq{QNn9;_}u9F3o@401d8#h)lg`n@z_Zi##@JmLhgac;5~b@!wE) z+l3YdiJN-0KZfV|&(IG~B-h4IL_M#))E~wxzJW@C=|nK|#q#JJTD^J0MVl|y_^AZ6 z67_Rb>^8q-qfnD*6oi^6a4^=8&lCbKO?_B)q9!;v?X!u?22^M9_1V$?{N4Fq{Ih@k z(f{~^@%f;bu@T@mZSaHBO4B$`#Eu*ndpBH>X^;CeBn;~|jGt3)bj)4+>OqrT>BH9|3u(LacaA^$-?1bv`tC$oOFbA(3iON6{vj>@BQ z=e_c*9H4GbaF-UR?6LTS$-;mz;z5!pZiLbAXFsEO+Z+u@0WjI+;b6Y#z{e+5XYYpX zs%`6JkvCRKp=-O$cKWBLGvc}7d3SPrjGKnX#*;jg-j&P2u9c(e&XvGT_jEBDUb&#{ z!?(km+TSlUiIAY5eZjC_bi3F2|LTd`Y32f6Z^FdmdV?XxnE(5r$6pth%h3>D^?F*{ z>UyvCdJWvmQ#IPqqvmnIg>5&mTfA#BQP53i5QrI(=aZ!wf^)Pl6d+^4=W~pAWz}{x z7sU5?P@S2ua_Mm~%y1V`2K6UDPm;fXiiuZ2DkgKo>02?EBz%ts$H%DPF;*D%(A?-8 z@|+x_e1upQcf1^GC5aW>-;|-eqw(b5BrZazu_eW4ODQkmQYN;MA=#%~9AJ>FXEMh*b`U3(9 zM?SYt`MAWj6(uI5H0Ds;LaSd)rg>QH6CWZ`1a~ri@yu%|lFJ}pM#SgqN2-S*8CXu@ zx)$c!#T%|X@HG$mljV4E=jPR-cukI$oRWfB1uIsAcD3+qkM~d zCdQHBlZ`@AM1*ZB{0(*n4HM+Gs^7NdBxBspJ-oB#ITfzf3W#1dd0Kl6qruZobc_2V ziI(*tBJ9;44!#Q&Z%YJ8WL7iqVs`Wqdg81p&*M?MI1<9WsPh5&}2fi>D!3du#pft7UmY)S+qe> zDGC0>W|<{{X}DAz(Zz&@Y=v9=!K6!(F3aIjaas+ha`Y5|CAB4M!Mmih5?*j}#J8-S8hJm$!*3Y?tCmyhu}$!XjukLPjO$M9;}o z^u2Q*UD>S5TZZAWAa(EsA;JsTO zw|sE-t*`Igz3N$Xx3ej?}A+1dAnjo>NK-@3$Agn)grUvm>XRxF;WBsfAT=Th(`q(y`D^UT3Nr|4g^@PV z>H|#kG9K{vvr-Rwb}hz%1Fdu_K64}( z9D8ah@m6qCcw`vO64c?z>6`*Ng3uF9wuTYn(sr29{NOI8uLU9>0O{Si;*k32!Cl1+ z$v552aVP?e?#f{6Xj*_fVP6G+}VhG0_ZjNJ+<`evok|z=gOuEs0!_6il+3J*J}Pn$?`;<~@!7Wf13cn`JcI8V<23!b^s&GD zO7Y(Y&@RTWi~KnIt(dQ*b4y}6YLd*9eOueAX)|-2**Q+>3sL2WY`-W?rJ7*1dmzkmB?Ph`wLjF z#|q});E9ag`7Oc60$t8myW3xRaDLJLsKu1feI_mV3M9_*smMYeS3adAf$6&Q4^67~)Zxim5Ff?0jrl91&BZ#W2 z!dhfSZeORwXp&9ldnlgf?tROxf*Ps7lDI=Y)*1A`s_h0VuzEg87X-?(`G?1Jje6@> z6wZx7dB@UI<7wA3&E5X#gltJ?4>?7s3;TD7)o|x+>MN?N7V=F%kPLIc0&5;7PQa-8 z1mmxRNfCI0C{m&&P}!MZRi4qnF`!@QrI^)TO>}ZayEv-g$e4_EeZJqlb^P>7SGX$| z9ag9ZItf5~->K}%$FYzX8^IAA^S^w(`@pgiIq->|rMRI*6F>gpzxqQo>ZF()TJfI? zFhaMev-v4x_Q-@>;yg+Vlz=}PsMO>&wsb^H2vT%+IQ;fAFlT9cG6sWT)^$?RrgzQA zVDKe_LuWoEDKpL1Vd}KGSIskQ5*#GyXpVOV^H5})2*M^%a^a#9&}&s%N{GyHZ$o|~ z$OE0!Rhc1zAe74RPa`7~jOxCULi>ShJo4B4HSngl600}fezW)nny5S^VrY*f2VW+C z4v06Oms0e<4-}@G3;&j?Z{pezmr;G?Z7U!XWQ~#Z=9T13`B)G4cR_aAp(}TzWTWwt zbJEQXoYaJ=$xZJ^7^^)96N?4QC!O|tdG7b|Syddl@wLHt?KM#SQh&5{UsokeYjcmu z1IOlC3_|rSLpYMksR&*u8EAW0+X|lXVf!OO;Y-p4;!w)2^v+?0WU46rJ*z~2UnFOJ z1eYomCA(Nj>cr0CR#lrVsuOq_OBW>*#<;O>P?;tWg@}|X&Bm^!&Vi>@A!z1GHTF0Q zlv&fcmmaA$PtyR?>S`Px12d^9$5bep0!uflj#T>!xX2hPA5Fmtg9HG)U{0X4gMy-R z6`t9%peWLI$QP>)p^(4^R5fzcQrZ?+-InU_uqZ6EXO>5nL-tWq4q;v2*u)=A;@ikb zG58uQQMHwDZd{b0c(}j!(ijLFywo4z^xKoOFBacy#%0+q*!t5L0xeAT;d6vyCLVE z;|yiIyWAq$uo{7gEoO{0M`$?;-Sg@>LT%85i4t9Bw zhyu!yr;jO1MU6tKyrb-x{#YQ%L=p=L9y!6SN*4C~=8Y&NrRe4W&8ok5i5y0SqldY& zpVa%>yYJt9>%K;`Ds_tZ2jrP3-(L<7yJf$7SY7{(<54;1xW)CL(b(JT{_4&lVYySC zMWA>+ld&&F!^+R%%z|o#LgaeAiSYxpd`9IL!d}JhRQll}0AUn@;qJ{_78xQE6Q4M; zNj-DTG(}7DVaaa}ce}y?w{?B;a8h}aXBwByk))tn z=A~f0?^bVes4U?WMEg_pMLqOpjbWJZcvEXTU%(`*HHmT2d=d1?#%3XgNyNFGHLK`T zP5UmsjE;ijvIt*M5|-h+d{gIsaddP7h%XNbWNDr;3fb_Ay`%0;R4WMbWRFZcP z-?5_>g-=A@snL>)?>g;qBu#2JO2axBAc5=0(TJyQe5{QH) zgDB*SEL^jV%rLUDa(Qx)wbwV3u{^<6g(WPih)Fe~`Wa#CXa8L2W6u-2P+>z#aC#J- zUrjwzlpG1nEm1ld9&c=R3Fl28tLQsNNQ5sgQ)6`LL9(>U-y@gds(-OpV*~ z3FK_VY8xY9jNYgIkIX8^p77g;gKCCG6{@AYIqHT|W*&@*mDO8m0lCqNBdpR~F|C}R zuzO|*o{V%}&rA^F zE|paY^X+1C#!)^THJu^hSR$uR6>*F#`6C@9`<~A4?WO!N$A8LU&)~rq<=idikVPo9 z)~?V-ah*=*Iqh|Z8HX?5ql{ck&QaU`Y;b6c#GS7(**K~b0CaWaRVn0pd>bNaJfVY4;HdgJ#SO!Gr_0*YP5-L$1eLrat^gZPXEomsD<3WDpUiEo75u<3MOiK!~22+c-JI zlCZ!fR>s=q1={cTph^XvwO{N&V>azHAFP#G$M&ICS9b8=KIA{&dN#T3Z%GacdHeeo z#r{OUQvvn)y{Db~S5erX98*gB`R>!tKPiW(Yd(DX$%6Wb&%wdd&U**9E1sD z5ZvdCAGKJ>qZS%hH?6C>j;)t$R$^5WvhpZF6mkd%FET;6c}-DQ$B7i3h0pYLE34;B zowIt<;3GU5sDQ!sH0d3th(iis*-p88p_}Ati!wO}v6T*OHi}QuM{Z+nne#e-WcB9V za4!)ne%4d=@xkE`Dvsy!d7(yMg7mNhud4pZ;$m=XhqKMSos!>)3Ob=>O~xc<^NQ{y zrH{Ob3S3eV#Q^A8Q#Lvl3K4;A4F>(NLH>j&iKl#G%j28MtOc+}tQfF`d9}=5X{AJ0mhHaoDgFB6Pj=++<;*2DSC&+PE{ zz{hJG_i+JkArd`&ZB`Ps8FK1x&cKP^t`(h+zWVmnq&pK)l=FRqzZR>0M(n+V&j;I| zJ!NOA?|i=V$@f1wJpO$4^kA?PzkK$|_rJ4ueeaX`0AKn$dr!A*l(t}vSCHu`qtnKr zlU>lsz%8FdH8yStWnZx9lyeePLL)0f)AK7uDM{6%2U1cgmg#%*uInY1A0NZ!Zc4hg z2fC4ptdEy7@MnXYiODHKY8fTvq;Pt^R`9O#FU9~?d;(m^ALBuS+7L5%~NtR>`w`oCb=qxDkmBE$8-MM|=D#4Ku1VsAo^ zk}5P?!5nrrutETnF_sfdQ>0Zg8d46$0fE^IwbfvoSCWlxVfz|$ulVt_d~NiuBaXv5q`u6C8|k?R}>aP zx=D%5#b#7bAhx1Pm03W8tQ;?vqa7-prq+C-F3)dN^h%oKrL7tY4n3-s;ht?%)#2FG zHAvcCZrH|^#KOU>+bJ&ysw2(=SEAa*Y>8*b;)@y?USOloYDRCfgFHTw>>k|(wzl3= zfX3bLS}t}zBA(j4eG!TQ11U4(Zhdmv6M|5Sq7|HutXs_7zLD6RJxa7h;toN@*TvYJjd7q87at{QQ{wT-rFv2-{^7*n(W*KC`)jXD`agoZbAm%u+~P= ztW=Q>uKGG#3rwhJo>-MBmaIv$R2-SLb(A*4OdBJcBy?6ef#H|xWJ31DWL9bQMC?hF zV1VK9fn|t#?NFFispf`PlXL8@5I7fv2fLy|eu-s?{$ekvst8SEssB5+1A25df9B&f zy91#BDYzCKnCcJ%X{Q{@eClC?luiX-Lm>I29ati9|L0~q! zflrpy`mDJKMEURoTsg%sIgzJ=EM@uq#H>jtRnI}~4VCB4M6*$pAk6bHIuCn1F`{Eq z31;QAqjIfP)KNLJxlo(=s*^y;hQiO%9p+^w28pfn1fze+PxaI zhib6IsAe%ZS6W^)&`TUEd>RWeX<>(D|B;&Kjq#!oS=nyVcfr>6BWX!!>j*uDZ6Tv6 zMauFF(t`?g0(f}E&r!(XT5aC4j127=OUP2Hxo=KV$yHA%6?G{h>I1v9%)^<;t96mt zdpXMBhOkRT=Ct^OI+ZEZ=ur?T^+xyR%9v(BV)H3#s2YRN(+{5@4Jouf*MB~3ne_J6 zEtzrp^46!Ex}p+=T3Szq+rC&&DVppQd$(g-*wH zgAl|>UKAjQMgVty=ix&D+(R_0GW^!3{1UGp%oj&@Sn5>-_ZBCs2C$KNrcNPNui4qV z&C6GdYsKr`8`*V*p89x(@(w$cO*t^?@xTyQFGB4;kW6XYQB8xChE`BzLL92U(dp%R zhSP3WMnYhXwFf!*2Qnpg!<1mWi&z${85Ne=U>y>ZX$=9gg2a_>Xj^j`k1rjo;TU@h zsbI6~#We<$JaBQAbh9C-Wx0%;!bjz2>OfVFkfHY}VCbO7wC~9> zPR|*@lx?YyBO=QUkdtp5k|v?N4z!6W>uSk{BunFu>@0;ms$n^;ZRi9J`+agaKS)y^ z1oRPdIhj*yp;BGEvbwhg^@Q^~H?CfNxC7$%VFt5M_Cyg4OUELxm^%-5ivdUCg#Ar3 zk%_5_8LU1J%fOQjjpS>G3cMvWDgS0wY6i*$PNZ0}m3LLg|XT6wY@EXS|Ra zKJw+-fCaWrxCao2%X%}E;ApEkYz&lIRys9{S4eO{)0|NvZYvVU*HuQ%J;_ZlYN;HZ zLKJv?Txu-~Xnsrx?&F|_X*F_W;gC0AXsA%oAQ7qzvbzKv6&=Q#!UJXr&ZxzC^j3XN zp7hh;#4^i%NCA)9t5;NX$T(2{zcdmnsswSJUJ-v=uLOgLIC{lq)M zfV#Dz>sZx%X-8{_WddHBA>x6wrX~7iMxkJLP$n~>NCF9?ntAibK7pOQW@u}lWGZ|O zYb5$@9aJ`vY!|DWu?UYx-r^l#uq*lwCRa+f%O_Gfy6jp=U8d}$mycexl zOVCdw!)&CDhZ-!?uT}|Wve;YN=;+-MF@1Dow1Lg0h%t!u)}of+^49IP)3ZW<96+*T z-JCP}sVz|*@cO`Z^ANXns0GqH#cjF>92lwIDPC_D9P))9T*f>I6WH^k9fgy&i;l`H z@J{VNs(jKR=exx}F79?8=+*gwQzV}{BwhbchqG!vV-A(520w`R&V$*qVzBwabUgbR zU-d&%n%isj5(KKsf?^jP8^=k|xNjHXb*?kPd5X9)iJvF;CUuc^lYMEJ0(>ms2#B(y8^tLUpev?@q^k zw+pEB#-}~}OcA%wh{W7$t-m#&rs6G)zx({B<~$&rGnf8@a}u&$I9(faPV+ZCc8geh z^zloZnZ%<~?7V4W9aCvRMoBz@bn_t3nROUC^Q7F)3)k zZv%BtIJ!Nj$fOZ|WiOTPl!ams2~{qWW`_feFKjT{5E`kx6K{K-S*Js6;`>S0r&38f zhPf6F+ewcbNm6-Yi^`lIc$0bqlv+#Z1FLpYZIyRHmKuMvxKEoVRU;&1`VwvKFngg9 z?o^0KR`h|W2?kz~+BW}*Is}-2!61?L)MDV_SxL`A_Csj)A};~rM>olUPtH`UXyr61 zT3K8pjYi4-0(4%}kUJz~c}JEjL&{2|uOV>SIed12_cgi*g&wN(ZzuVNYIl&<7HKLl z)D}rWYb6Zzc>S6T1~XP|Q|q>kHh#kh&ij{3BchdRQiuwCO`?)6*$q~p8H)h`wc){a zK>C?UPliS-wxxjFTqsqXu~G>mfHWNNtM3=Q?AKOkJ1qYwn?o(=a9DVXt@mKz(US2j z_T#%y7b~qsR)yopc&&HJuVHmby@FbR*YIqSV-+1`WX7Hhb>YHN^^Mk!-H&scF9V;( zB2vN6+8G=M%;^x$6ClFWix1KN1fuYwSLG|!UcorkP8$Do9WNib*6`j+J@+$@N*MQ} zrS<4Pf=qW@nFoxgT3=>y z7UOUDvVK2($#F5$}iV z5@ivu@brxS(9Z)D$pz$R((p;-5%>LBJN`D3Gy%L-5KHD3udj8**(k@H6eqe5EOM`gW29L;RUk`mE?4U6ml7p*f{z%S=#{#xpd7%0BH&j>p9d+ zGU;=tA>enyM7Wq}mPFc+3T@@=!@?W#Ao!;F5 zez<>Irze@^rTz%&uUp?)w^uMFdp$W|wv+03AamEOS`Sf%wQ35=x71XxB_hnGWDlhZ z!@@PJU=^%9I{p$B;6#m+axpWN!(-X{W~)<^nA};+HKTc>Qt5JUIZdY-HUPT@m7-)k z*IrW4fthbi843a@l2*{@O&eAb(@euo$&!AeWCJ3I(&WEr2RMaMiQt>cT9|8V)`(V+ zTZ6PzQriz`f+A?L&04FXJFV82lNo61(GjEF2T{RrwcYk(Qv>X-;8TDEbj$wmRCR zr1h7+&(#%8R+CxiIIGQI<29lIZ04gbM`HOJMX=p1zYD!;20v<=f> zJ?|jE2X-TtT6NUg)>WV#uw`K{^+$-ht3<5qTb*`i>uhT{oDGEIX)aFku6y zca*GYW?#}5uhi+a7Eq9os0fH%WGaXv@0+andgnG24 zzQMULQ4&17UxjF*mQjmQNy!)M2_8(mYch(HfSB5k@+Jl1)3CpUvliMR^)ei#_JZwA5JEKhX;fYfd)|EQ zyZ~(ljrC5hUTdBx{7-a5{zt$4U#|bxuUxvMe`_6)L0laE+L+8~T@1+*DK>qLXbIEpDEhA{7+(%)OB-hz_J{k&&q(6R?$L<;p7`GPo)kC3rZ zRgO%Z)f0utY4sR?Z7_g`0ikV}AS26TG-m^X7ldZiYS^{5SfHt2 z(*=DB_~^U$KYnocoy1EGI0DH>)gkpWr@LSI$}ZU^s)JGK+mFbwo>V#|nM8W@5;hI* z=#3*PyKlTdrv_P>%A{E5$xs(sSix!e`hM zyp=*%$tp1Gyont>{HTtU-WF$sBaQgz)SG9Oz@juQ!GajiX%;V$*&a3tEEDG(R7Px}R{IWZH6 zo9uHwyYAu|J8q__g)Z$1N%X$5sJL6xso@t>Rx>D()2gdVli8sXmFwh7k8lUn#uX)O z!N_&TO&c9G6C{KEex-_H$4S2Cw{iTpHIL|LUg>nB`n1TXw%7?B$E3@BLvm4t$U40p z{+~H2w(w-_n6na)mKLI~ymnm$3*F*FqJLmwHc@KEoERYkS93xaKVXRQ8_|~3oDc)c zYnEayed4250i5dF!q4%Oz=zzsVrg?Ob>*Bqm^1sV=|#xdq&*m3oq5S_sAer5R5X?=d)=$-j)55U*YHH~v$l%&UH7nN=Yih}$FiG5wVnz`-B7(`k^!~9D}A$_0&LdW)0 zosC$zJVCR!PF4aDvW?;|?W#q_HG`+goBtb$1xwsFKiBV;M1lx)PTHn#n8~&O6oo?> z#*SOi7i2-Fr^|^%7*{z# zMVghgbMe`PK>|6NENCli@@o`M(Muq6Y|T4b0b!X&mrsb^@ZAtstcdeIUKt!2D}~fF zd{)SHCM!{o)gU^2rp~oVTxbjml+di6^p(X#Q(A3~T{f!$VYqa~>j4`4KyOi{5?&mh;}HITO(lx9~2GDIDp=#93IB|n5_Qf$mu}>g3{j!dc`T@4~!Z4!lD<)u|T+3k77+GSEvl}jx zW!<@G+qMpf(mOmGOrS1o&emGT*8 z2YDqDJfPuM%sU6QTilP!Z6x(^1<5&->1^+#UJ|tSaQgV`2cJJu#nI>VVtexRsnstz zoupd{bSJjVDW`}yKa*Z?KdqMfT8WaLshwavKGiIOJH7XG{|=w6O#Za<&efjR7omZ? zpjS%7P39O%_dTxR%FO~zpf1$1|1FcK4~WK4OiRZ({h^;@TkGPK{zi|UHkOW9>j=(+ z&v%<26Oy(MX_8b!zg>JXJer`p)3@(EwdL6a{7+uyRefx5>GVuOy7j%Y`HOvKjVbw$ zCdni7_j^x4h)JPTuvA~J>-P+;s-SFfzs=^Sl6#da1^QN|M%==cuQQ7%er0pP$R5}Z z9*Ko)!e=K9gV0vfDdCZiv>JY6g7{6%vY%c}KEbrJcolP-X>on=YP^F@r$tUa9X8SEtuPi=Q$27dE_n}er;EqXQc7NctPV{3U{gih=g?NVikt9) zX2|W;L}M;6RwQ6JgEqK@K_Rqoq1T?b(~+%`K3|2b9&1+XRt8a{>T%R??yTgbxPSNC zk`(|5iSz$(FadILELG;B(43Vp4WgmsnnCJ*_)kS zs~4@%f~Z^ruh~M=jwq_8r>9qoPyE%owJ{LDP6(wb6)mN^>^1`f;fJJQxZwe3Y&>oEX&EVz(*y~oC zy}X=w1x=f|qcv4^rHD{lZv?MYZ*);RsP;6j0M2LcvXpCRZ2( zNQxOAMI57Rtg_PeJJQ;oZPYUYMeQGBMH| zx+(0jPKBmAIJtxGE|K~nq_39ki62+d#&-B)NIIqSKfLJO9-*1WhhgV zD&BH9OApupDfJso8^pIKHWK7=S(7PWlqP7ID*_HN(k@wCD_ERw8{c>nwc?uL?V7e; z81XT^vQe#}C9tPDu|W*NYCEbiTuujC_TRy8RxWA7lVNK6W3H_4jAV~S-|+?C*e~8v zR)7)ywo2-^=%_+6Aj|MReq6C zQuH<#-y`u#S3POo$!Hz?(Ob}&i!w_f7JLKJV_U27%XOG_vUH`LV!WdX9H_ZzThsT3ac(2# zuF^OJWkEc`Il}$1YQ1V!>BdOU_V1v~tddC_9;*QCMc!x69h{Z-RTcIk@1yiLzJKcl z-zO@v>HS>snFCH#3ojorY(884@EGWif#8@?j&FI*p?4wq=z@}`C`F_iLYCkz z7ll|GO;LxpDUc-S^?W_3;)oLcJQODy!wj5GDJKyOTF?dYz4v?D?aIY1Uf*970IX40^A0Qj)#({zGm9Z3_3 zGhI?f7ah0-8^Bv2DYw8iyjrNwl4DWtAnE;FS+u{~#5Xn)>%@=XAdn8DxPWuN95Ws; z1R^&eJ{l}`?A8gAe8Jhpw_I~3ZQlDPo$1Tj{cpWhi-zW#CK}*Uj`v(AP^kNWu4j+p zdb-qN{@ARAZXV2vvHZ#y7D{Qgf{a5`5K8kJ3tO0r|&gwNvud+UE4zr1ATGU&!2TNZyE|b9>nx2)6 z^C<+Aa0Mg`u8$nmlygz~oPFb?sy|RCVouFbAte9D(p(@dC7Kvet8fuxoOlKyH)yv% zsg4h*KSzPIzp$DKl`;9Z=v=G&PgUpP~9;WnqP4jJYJm~AB)1f`m?ig zgCsQnN|m1`c-UucwRtsy+fuh4XjG;m#8S~YnNda>I!BvjU(hc~N=w<|wXb#&WeVb2 zRK=TGq>`q5pgQD1!z`?0FHO@mWXb!eX$3DWltqD_0>g=w)kxIsc5i59CCq!txk8a< zfKl~Cw7rRbT@u?ZKEi?bet=|*1v})425tKEnArWr|;V1!D@{AYtmeBY&5Zp1=QxCr9Idc-*zBUP2l!D)xho&>uA5f9KA^hiPlcC#ftKf*2stGURuv8+}tdfFz)ed z@32NM+a-H&6`>Z?zN{WgztusI+D5jMd$#FDQe1PUP!?Lg@kL_DK&aL#)jDgu?-k;zZs_Di|HyEH2FK^|1y9JD5V=c;QtKeBXpl@B|9L*|h zP%}7ylfq9Oi?Dd;jIP$oEuXDnSkMRZ5oL6St~U61d~_EOPUm)MhA1q8IIRaC@XUgv3Az}T!h60` zlizB6cq8Erqxtz@xi9kTjO#F4PAp4wBJq5At0{X#TS!jrW%B(H2G(#w8=09giLYGM z5PK!gGXynwntnO9f5qK}`;UzUc_Br5xt@K4SoGUJf)+Fu^Kda2vb)C5cJv8vF3BeM zHCbSFI+=Q-2t($(E`Oz?>gE?_=b4I<7qrTxsj%;Kd5TqX`*1R!jPZoP54@gZx-~x` z0h|*lR!wMM8C0*}?F7<_Ar%5=Cp{ZmUBgo~y z4J$8T;@ZZl`ks~|F%`C_IV?EA)QvA@824a&ZJ}piSI(K>=Ojq0b!fPVYGbc$)7_9h z38mBS?XJ$S5hM&w0o9~jNm{4!)J3gMr0IGB!z>q4hHz;$?MO>Vr*XsZxU8F@9eanV z4So~iCa8*at%6jiy`uD9yEYh;VM4#BwgdH!g8{3akJ4(RF{vlQa3+u4Hqsag-mFYn z)#a_8cvUI8KI#1IMsE&Fzip2m=uv`7#gBjZFaIN!0Mcyii!bgE}L4S<}WW00g!`{9oj3X5_1orl-twtBWBtLx|dPabX3Y{+iN zmIrTWpoUZuIlt6ohk33M1F`%a*M_)Bl4!9h?h(n7CJpk;Mn(>J$?eOj<_42l3}mrg zd`$cSzWA7ExLwQ4D${m$>&H81koT0wD5x+rZ5IzND6psCG;2oeAy(8wcufu=Go>|C z3;d9dKt?4j{)q+6qF{s)60IiNq_4S6;(@~zu@~|X#z3be8)@%@o=d@%hb2eE=8-!V zmL*GTuNY^?`iBP~j1#FlqTu;XCcvvTqhFAj3@h0pEmce8L1g}GHtnn$5e<{zuNV4^ zk(vxeeWBIB>R?DNsinbzR;(pB_$GhXv0jr|e)Z#n-Bxk&#|L{}f8$aV&mr`XqSTzS zL#-22#t!MQPu??u7)M)1oMMa`lQXpo zoRs(T6(o3Tt1kOV!ha@WuR)ntwh2zatc?FZQfWC0D?ZbWG&<{YmfkY@{prS*&%xmf z7L`pc{V^m^I-aGA_27eDDmqjx;BRyj?41o-p9;MECj7CPizpRH43x0N=$g8l{_dbw zp3rXfj&3KTg!~D$1$W-im2{iC!KC*{!&8PlrU32EmGs#wm0V~61!sx5Hdgq-T7OHu zU^0k0s@Azm9RboIk*_g@xuPCwc==ZA1Gw&xL(R#r5M5-Y(vl`g-ncy z-{=MZQXe5B?3>7e@#1@Ap@nZEYE(Fw)e}}yT5^&eByO=e2!kNF=L+rFvR!hypukJe zAt18cmN03A0Zp8JNLGc+y>5wVTq7;zsXDOem@J)dMUe|#gP^tw%}cWrn`IFZ*A8d} zaeav8jwPk`ZfxuNN1)wF8&x;9&61@h%bFetuJ0F z{#}RcLR7joIovl(3P3pKzOlm!3oAu!*4HKi#NJ37RJ-#|uWysq<)RutAoWpoObAz= z?|yH^zxP9`Ium|o+T`VPIWiB+=!CLn#NTzUWreN!*PYsozPeK{jH6O`|7>u?;mqR8 zw{A>nW(ICv>JQ@y-I6;cx!lSkt%wczMzco>$lJk@9Bwg*;>pDn?M!l`(aAzZmx+Zn zeA<#^)Q0JlYU;g$fV^$8sb<(B-$-}PNYUFd4b=ig_Dc2%ps2^F#!uaGvHPvj1I`e+ zd#gQyvv_}`%Mc|jP~$2fF9}NWxB17b-GwEjbSDc&hQ_93bkH9l2N6?7b~Uu_PXMbs zKZK-GCtzQoTuUY;ae@dYnmp85IDp8PIHFu^im_!rbUUKI7_K(xn)*3mxgZv568-^S z_GF$*Q4MU`^Wrc5{eSpBfBY{HX!qXxxxe#;pZ@~+*h`mw;a7j~;h+81pM3W(Nzdxv zFWIO5KR@|R{TJi+V6h|=KsMpP+BK7~Z4U7x61@jM`QXjpE-qctKehBtKR@}+U*@*P z)(}<-IUEr2Tp8z2zw$dD{@$N|^{+nul}nfOPj@_~pP&5Z|E5`RYaFi~SQ%r}J(2Yo z<*%=d5oTL^J)2q-*JE6pt&Wk4rdMFulmM^C`NEG@#z`Duqx{`p)RVu+|NZBmyL9P~ I|Cj#yziV&8E&u=k From a166e51d938e859bc2a0a1c1b0347c8d0e7962b3 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Tue, 10 Mar 2020 15:10:33 +1030 Subject: [PATCH 316/335] docs styling --- doc/xanadu_theme/layout.html | 2 +- doc/xanadu_theme/static/xanadu.css_t | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/xanadu_theme/layout.html b/doc/xanadu_theme/layout.html index d1aac538f..a14ea4ca6 100644 --- a/doc/xanadu_theme/layout.html +++ b/doc/xanadu_theme/layout.html @@ -177,7 +177,7 @@ {%- block document_wrapper %} {%- block document %}
-
+
{% block breadcrumbs %}