diff --git a/.travis.yml b/.travis.yml index 62e434d0..b8ff8b2f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,6 @@ language: python matrix: include: - python: 2.7 - - python: 3.4 - python: 3.5 - python: 3.6 - python: 3.7 @@ -12,7 +11,7 @@ matrix: services: - docker before_install: - - docker run --name arango -d -p 8529:8529 -e ARANGO_ROOT_PASSWORD=passwd arangodb/arangodb:3.5.0 + - docker run --name arango -d -p 8529:8529 -e ARANGO_ROOT_PASSWORD=passwd arangodb/arangodb:3.6.1 - docker cp tests/static/service.zip arango:/tmp/service.zip install: - pip install flake8 mock diff --git a/README.rst b/README.rst index 02020aca..62a3e5c2 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ :target: https://badge.fury.io/py/python-arango :alt: Package Version -.. image:: https://img.shields.io/badge/python-2.7%2C%203.4%2C%203.5%2C%203.6%2C%203.7-blue.svg +.. image:: https://img.shields.io/badge/python-2.7%2C%203.5%2C%203.6%2C%203.7-blue.svg :target: https://github.com/joowani/python-arango :alt: Python Versions @@ -51,7 +51,7 @@ Features Compatibility ============= -- Python versions 2.7, 3.4, 3.5, 3.6 and 3.7 are supported +- Python versions 2.7, 3.5, 3.6 and 3.7 are supported - Python-arango 5.x supports ArangoDB 3.5+ - Python-arango 4.x supports ArangoDB 3.3 ~ 3.4 only - Python-arango 3.x supports ArangoDB 3.0 ~ 3.2 only diff --git a/arango/aql.py b/arango/aql.py index 0052ebe7..c7d82fb1 100644 --- a/arango/aql.py +++ b/arango/aql.py @@ -140,7 +140,8 @@ def execute(self, read_collections=None, write_collections=None, stream=None, - skip_inaccessible_cols=None): + skip_inaccessible_cols=None, + max_runtime=None): """Execute the query and return the result cursor. :param query: Query to execute. @@ -228,6 +229,10 @@ def execute(self, available only for enterprise version of ArangoDB. Default value is False. :type skip_inaccessible_cols: bool + :param max_runtime: Query must be executed within this given timeout or + it is killed. The value is specified in seconds. Default value + is 0.0 (no timeout). + :type max_runtime: int | float :return: Result cursor. :rtype: arango.cursor.Cursor :raise arango.exceptions.AQLQueryExecuteError: If execute fails. @@ -269,6 +274,8 @@ def execute(self, options['stream'] = stream if skip_inaccessible_cols is not None: options['skipInaccessibleCollections'] = skip_inaccessible_cols + if max_runtime is not None: + options['maxRuntime'] = max_runtime if options: data['options'] = options diff --git a/arango/collection.py b/arango/collection.py index 4d422eba..3b8b8fd2 100644 --- a/arango/collection.py +++ b/arango/collection.py @@ -38,7 +38,9 @@ ) from arango.formatter import ( format_collection, - format_index + format_edge, + format_index, + format_vertex, ) from arango.request import Request from arango.utils import ( @@ -1458,7 +1460,8 @@ def insert(self, key and the existing document is replaced. :type overwrite: bool :param return_old: Include body of the old document if replaced. - Applies only when value of **overwrite** is set to True. + Applies only when value of **overwrite** is set to True. Ignored + if parameter **silent** is set to True. :type return_old: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. @@ -1596,9 +1599,11 @@ def update(self, :param keep_none: If set to True, fields with value None are retained in the document. Otherwise, they are removed completely. :type keep_none: bool - :param return_new: Include body of the new document in the result. + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_new: bool - :param return_old: Include body of the old document in the result. + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_old: bool :param sync: Block until operation is synchronized to disk. :type sync: bool @@ -1676,9 +1681,11 @@ def update_many(self, :param keep_none: If set to True, fields with value None are retained in the document. Otherwise, they are removed completely. :type keep_none: bool - :param return_new: Include bodies of the new documents in the result. + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_new: bool - :param return_old: Include bodies of the old documents in the result. + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_old: bool :param sync: Block until operation is synchronized to disk. :type sync: bool @@ -1806,9 +1813,11 @@ def replace(self, :param check_rev: If set to True, revision of **document** (if given) is compared against the revision of target document. :type check_rev: bool - :param return_new: Include body of the new document in the result. + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_new: bool - :param return_old: Include body of the old document in the result. + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_old: bool :param sync: Block until operation is synchronized to disk. :type sync: bool @@ -1877,9 +1886,11 @@ def replace_many(self, :param check_rev: If set to True, revisions of **documents** (if given) are compared against the revisions of target documents. :type check_rev: bool - :param return_new: Include bodies of the new documents in the result. + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_new: bool - :param return_old: Include bodies of the old documents in the result. + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_old: bool :param sync: Block until operation is synchronized to disk. :type sync: bool @@ -1997,7 +2008,8 @@ def delete(self, This parameter has no effect in transactions where an exception is always raised on failures. :type ignore_missing: bool - :param return_old: Include body of the old document in the result. + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. :type return_old: bool :param sync: Block until operation is synchronized to disk. :type sync: bool @@ -2316,7 +2328,7 @@ def response_handler(resp): return self._execute(request, response_handler) - def insert(self, vertex, sync=None, silent=False): + def insert(self, vertex, sync=None, silent=False, return_new=False): """Insert a new vertex document. :param vertex: New vertex document to insert. If it has "_key" or "_id" @@ -2328,14 +2340,20 @@ def insert(self, vertex, sync=None, silent=False): :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool - :return: Document metadata (e.g. document key, revision) or True if + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision), or True if parameter **silent** was set to True. :rtype: bool | dict :raise arango.exceptions.DocumentInsertError: If insert fails. """ vertex = self._ensure_key_from_id(vertex) - params = {'silent': silent} + params = { + 'silent': silent, + 'returnNew': return_new + } if sync is not None: params['waitForSync'] = sync @@ -2352,9 +2370,9 @@ def insert(self, vertex, sync=None, silent=False): def response_handler(resp): if not resp.is_success: raise DocumentInsertError(resp, request) - if silent is True: + if silent: return True - return resp.body['vertex'] + return format_vertex(resp.body) return self._execute(request, response_handler) @@ -2424,14 +2442,17 @@ def response_handler(resp): raise DocumentUpdateError(resp, request) if silent is True: return True - else: - result = resp.body['vertex'] - result['_old_rev'] = result.pop('_oldRev') - return result + return format_vertex(resp.body) return self._execute(request, response_handler) - def replace(self, vertex, check_rev=True, sync=None, silent=False): + def replace(self, + vertex, + check_rev=True, + sync=None, + silent=False, + return_old=False, + return_new=False): """Replace a vertex document. :param vertex: New vertex document to replace the old one with. It must @@ -2445,6 +2466,12 @@ def replace(self, vertex, check_rev=True, sync=None, silent=False): :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2453,7 +2480,11 @@ def replace(self, vertex, check_rev=True, sync=None, silent=False): """ vertex_id, headers = self._prep_from_body(vertex, check_rev) - params = {'silent': silent} + params = { + 'silent': silent, + 'returnNew': return_new, + 'returnOld': return_old + } if sync is not None: params['waitForSync'] = sync @@ -2475,10 +2506,7 @@ def response_handler(resp): raise DocumentReplaceError(resp, request) if silent is True: return True - else: - result = resp.body['vertex'] - result['_old_rev'] = result.pop('_oldRev') - return result + return format_vertex(resp.body) return self._execute(request, response_handler) @@ -2487,7 +2515,8 @@ def delete(self, rev=None, check_rev=True, ignore_missing=False, - sync=None): + sync=None, + return_old=False): """Delete a vertex document. :param vertex: Vertex document ID, key or body. Document body must @@ -2505,16 +2534,21 @@ def delete(self, :type ignore_missing: bool :param sync: Block until operation is synchronized to disk. :type sync: bool + :param return_old: Return body of the old document in the result. + :type return_old: bool :return: True if vertex was deleted successfully, False if vertex was not found and **ignore_missing** was set to True (does not apply in - transactions). - :rtype: bool + transactions). Old document is returned if **return_old** is set to + True. + :rtype: bool | dict :raise arango.exceptions.DocumentDeleteError: If delete fails. :raise arango.exceptions.DocumentRevisionError: If revisions mismatch. """ handle, _, headers = self._prep_from_doc(vertex, rev, check_rev) - params = {} if sync is None else {'waitForSync': sync} + params = {'returnOld': return_old} + if sync is not None: + params['waitForSync'] = sync request = Request( method='delete', @@ -2533,6 +2567,8 @@ def response_handler(resp): raise DocumentRevisionError(resp, request) if not resp.is_success: raise DocumentDeleteError(resp, request) + if 'old' in resp.body: + return {'old': resp.body['old']} return True return self._execute(request, response_handler) @@ -2609,7 +2645,7 @@ def response_handler(resp): return self._execute(request, response_handler) - def insert(self, edge, sync=None, silent=False): + def insert(self, edge, sync=None, silent=False, return_new=False): """Insert a new edge document. :param edge: New edge document to insert. It must contain "_from" and @@ -2622,6 +2658,9 @@ def insert(self, edge, sync=None, silent=False): :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2629,7 +2668,10 @@ def insert(self, edge, sync=None, silent=False): """ edge = self._ensure_key_from_id(edge) - params = {'silent': silent} + params = { + 'silent': silent, + 'returnNew': return_new + } if sync is not None: params['waitForSync'] = sync @@ -2646,9 +2688,9 @@ def insert(self, edge, sync=None, silent=False): def response_handler(resp): if not resp.is_success: raise DocumentInsertError(resp, request) - if silent is True: + if silent: return True - return resp.body['edge'] + return format_edge(resp.body) return self._execute(request, response_handler) @@ -2657,7 +2699,9 @@ def update(self, check_rev=True, keep_none=True, sync=None, - silent=False): + silent=False, + return_old=False, + return_new=False): """Update an edge document. :param edge: Partial or full edge document with updated values. It must @@ -2674,6 +2718,12 @@ def update(self, :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2685,7 +2735,9 @@ def update(self, params = { 'keepNull': keep_none, 'overwrite': not check_rev, - 'silent': silent + 'silent': silent, + 'returnNew': return_new, + 'returnOld': return_old } if sync is not None: params['waitForSync'] = sync @@ -2708,13 +2760,17 @@ def response_handler(resp): raise DocumentUpdateError(resp, request) if silent is True: return True - result = resp.body['edge'] - result['_old_rev'] = result.pop('_oldRev') - return result + return format_edge(resp.body) return self._execute(request, response_handler) - def replace(self, edge, check_rev=True, sync=None, silent=False): + def replace(self, + edge, + check_rev=True, + sync=None, + silent=False, + return_old=False, + return_new=False): """Replace an edge document. :param edge: New edge document to replace the old one with. It must @@ -2729,6 +2785,12 @@ def replace(self, edge, check_rev=True, sync=None, silent=False): :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2737,7 +2799,11 @@ def replace(self, edge, check_rev=True, sync=None, silent=False): """ edge_id, headers = self._prep_from_body(edge, check_rev) - params = {'silent': silent} + params = { + 'silent': silent, + 'returnNew': return_new, + 'returnOld': return_old + } if sync is not None: params['waitForSync'] = sync @@ -2759,9 +2825,7 @@ def response_handler(resp): raise DocumentReplaceError(resp, request) if silent is True: return True - result = resp.body['edge'] - result['_old_rev'] = result.pop('_oldRev') - return result + return format_edge(resp.body) return self._execute(request, response_handler) @@ -2770,7 +2834,8 @@ def delete(self, rev=None, check_rev=True, ignore_missing=False, - sync=None): + sync=None, + return_old=False): """Delete an edge document. :param edge: Edge document ID, key or body. Document body must contain @@ -2788,6 +2853,8 @@ def delete(self, :type ignore_missing: bool :param sync: Block until operation is synchronized to disk. :type sync: bool + :param return_old: Return body of the old document in the result. + :type return_old: bool :return: True if edge was deleted successfully, False if edge was not found and **ignore_missing** was set to True (does not apply in transactions). @@ -2797,7 +2864,9 @@ def delete(self, """ handle, _, headers = self._prep_from_doc(edge, rev, check_rev) - params = {} if sync is None else {'waitForSync': sync} + params = {'returnOld': return_old} + if sync is not None: + params['waitForSync'] = sync request = Request( method='delete', @@ -2816,11 +2885,19 @@ def response_handler(resp): raise DocumentRevisionError(resp, request) if not resp.is_success: raise DocumentDeleteError(resp, request) + if 'old' in resp.body: + return {'old': resp.body['old']} return True return self._execute(request, response_handler) - def link(self, from_vertex, to_vertex, data=None, sync=None, silent=False): + def link(self, + from_vertex, + to_vertex, + data=None, + sync=None, + silent=False, + return_new=False): """Insert a new edge document linking the given vertices. :param from_vertex: "From" vertex document ID or body with "_id" field. @@ -2836,6 +2913,9 @@ def link(self, from_vertex, to_vertex, data=None, sync=None, silent=False): :param silent: If set to True, no document metadata is returned. This can be used to save resources. :type silent: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2847,7 +2927,12 @@ def link(self, from_vertex, to_vertex, data=None, sync=None, silent=False): } if data is not None: edge.update(self._ensure_key_from_id(data)) - return self.insert(edge, sync=sync, silent=silent) + return self.insert( + edge, + sync=sync, + silent=silent, + return_new=return_new + ) def edges(self, vertex, direction=None): """Return the edge documents coming in and/or out of the vertex. diff --git a/arango/connection.py b/arango/connection.py index 27f5b20d..ea45c237 100644 --- a/arango/connection.py +++ b/arango/connection.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, unicode_literals from six import string_types +from requests_toolbelt import MultipartEncoder from arango.exceptions import ServerConnectionError from arango.response import Response @@ -142,7 +143,9 @@ def send_request(self, request): :return: HTTP response. :rtype: arango.response.Response """ - if request.data is None or isinstance(request.data, string_types): + if request.data is None: + normalized_data = request.data + elif isinstance(request.data, (string_types, MultipartEncoder)): normalized_data = request.data else: normalized_data = self.serialize(request.data) @@ -155,7 +158,7 @@ def send_request(self, request): params=request.params, data=normalized_data, headers=request.headers, - auth=self._auth, + auth=self._auth ) return self.prep_response(response, request.deserialize) diff --git a/arango/database.py b/arango/database.py index 973ed31e..8e3ce464 100644 --- a/arango/database.py +++ b/arango/database.py @@ -46,6 +46,7 @@ ServerEchoError, ServerLogLevelError, ServerLogLevelSetError, + ServerMetricsError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -75,7 +76,10 @@ ViewReplaceError, ViewUpdateError ) -from arango.formatter import format_view +from arango.formatter import ( + format_database, + format_view +) from arango.foxx import Foxx from arango.graph import Graph from arango.pregel import Pregel @@ -196,9 +200,7 @@ def properties(self): def response_handler(resp): if not resp.is_success: raise DatabasePropertiesError(resp, request) - result = resp.body['result'] - result['system'] = result.pop('isSystem') - return result + return format_database(resp.body['result']) return self._execute(request, response_handler) @@ -675,6 +677,24 @@ def response_handler(resp): return self._execute(request, response_handler) + def metrics(self): + """Return server metrics in Prometheus format. + + :return: Server metrics in Prometheus format. + :rtype: str + """ + request = Request( + method='get', + endpoint='/_admin/metrics' + ) + + def response_handler(resp): + if not resp.is_success: + raise ServerMetricsError(resp, request) + return resp.body + + return self._execute(request, response_handler) + ####################### # Database Management # ####################### @@ -708,7 +728,12 @@ def has_database(self, name): """ return name in self.databases() - def create_database(self, name, users=None): + def create_database(self, + name, + users=None, + replication_factor=None, + write_concern=None, + sharding=None): """Create a new database. :param name: Database name. @@ -718,6 +743,23 @@ def create_database(self, name, users=None): and "extra" (see below for example). If not set, only the admin and current user are granted access. :type users: [dict] + :param replication_factor: Default replication factor for collections + created in this database. Special values include "satellite" which + replicates the collection to every DBServer, and 1 which disables + replication. Used for clusters only. + :type replication_factor: str | int + :param write_concern: Default write concern for collections created in + this database. Determines how many copies of each shard are + required to be in sync on different DBServers. If there are less + than these many copies in the cluster a shard will refuse to write. + Writes to shards with enough up-to-date copies will succeed at the + same time, however. Value of this parameter can not be larger than + the value of **replication_factor**. Used for clusters only. + :type write_concern: int + :param sharding: Sharding method used for new collections in this + database. Allowed values are: "", "flexible" and "single". The + first two are equivalent. Used for clusters only. + :type sharding: str :return: True if database was created successfully. :rtype: bool :raise arango.exceptions.DatabaseCreateError: If create fails. @@ -734,6 +776,17 @@ def create_database(self, name, users=None): } """ data = {'name': name} + + options = {} + if replication_factor is not None: + options['replicationFactor'] = replication_factor + if write_concern is not None: + options['writeConcern'] = write_concern + if sharding is not None: + options['sharding'] = sharding + if options: + data['options'] = options + if users is not None: data['users'] = [{ 'username': user['username'], @@ -850,7 +903,8 @@ def create_collection(self, sync_replication=None, enforce_replication_factor=None, sharding_strategy=None, - smart_join_attribute=None): + smart_join_attribute=None, + write_concern=None): """Create a new collection. :param name: Collection name. @@ -919,7 +973,7 @@ def create_collection(self, available at creation time, or halt the operation. :type enforce_replication_factor: bool :param sharding_strategy: Sharding strategy. Available for ArangoDB - version 3.4 and up only. Possible values are "community-compat", + version and up only. Possible values are "community-compat", "enterprise-compat", "enterprise-smart-edge-compat", "hash" and "enterprise-hash-smart-edge". Refer to ArangoDB documentation for more details on each value. @@ -933,6 +987,14 @@ def create_collection(self, shard key attribute, with another colon ":" at the end. Available only for enterprise version of ArangoDB. :type smart_join_attribute: str | unicode + :param write_concern: Write concern for the collection. Determines how + many copies of each shard are required to be in sync on different + DBServers. If there are less than these many copies in the cluster + a shard will refuse to write. Writes to shards with enough + up-to-date copies will succeed at the same time. The value of this + parameter cannot be larger than that of **replication_factor**. + Default value is 1. Used for clusters only. + :type write_concern: int :return: Standard collection API wrapper. :rtype: arango.collection.StandardCollection :raise arango.exceptions.CollectionCreateError: If create fails. @@ -968,6 +1030,8 @@ def create_collection(self, data['shardingStrategy'] = sharding_strategy if smart_join_attribute is not None: data['smartJoinAttribute'] = smart_join_attribute + if write_concern is not None: + data['writeConcern'] = write_concern params = {} if sync_replication is not None: diff --git a/arango/exceptions.py b/arango/exceptions.py index 7d3cfd6c..6c7436f8 100644 --- a/arango/exceptions.py +++ b/arango/exceptions.py @@ -601,6 +601,10 @@ class ServerStatisticsError(ArangoServerError): """Failed to retrieve server statistics.""" +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + class ServerRoleError(ArangoServerError): """Failed to retrieve server role in a cluster.""" diff --git a/arango/formatter.py b/arango/formatter.py index 2e6b8663..995f9157 100644 --- a/arango/formatter.py +++ b/arango/formatter.py @@ -72,6 +72,38 @@ def format_key_options(body): # pragma: no cover return verify_format(body, result) +def format_database(body): # pragma: no cover + """Format databases info. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = {} + + if 'id' in body: + result['id'] = body['id'] + if 'name' in body: + result['name'] = body['name'] + if 'path' in body: + result['path'] = body['path'] + if 'system' in body: + result['system'] = body['system'] + if 'isSystem' in body: + result['system'] = body['isSystem'] + + # Cluster only + if 'sharding' in body: + result['sharding'] = body['sharding'] + if 'replicationFactor' in body: + result['replication_factor'] = body['replicationFactor'] + if 'writeConcern' in body: + result['write_concern'] = body['writeConcern'] + + return verify_format(body, result) + + def format_collection(body): # pragma: no cover """Format collection data. @@ -110,6 +142,8 @@ def format_collection(body): # pragma: no cover result['replication_factor'] = body['replicationFactor'] if 'minReplicationFactor' in body: result['min_replication_factor'] = body['minReplicationFactor'] + if 'writeConcern' in body: + result['write_concern'] = body['writeConcern'] # MMFiles only if 'doCompact' in body: @@ -579,6 +613,9 @@ def format_replication_database(body): # pragma: no cover ], 'views': [format_view(view) for view in body['views']] } + if 'properties' in body: + result['properties'] = format_database(body['properties']) + return verify_format(body, result) @@ -608,6 +645,8 @@ def format_replication_inventory(body): # pragma: no cover ] if 'views' in body: result['views'] = [format_view(view) for view in body['views']] + if 'properties' in body: + result['properties'] = format_database(body['properties']) return verify_format(body, result) @@ -755,3 +794,49 @@ def format_view(body): # pragma: no cover result['links'] = [format_view_link(link) for link in body['links']] return verify_format(body, result) + + +def format_vertex(body): # pragma: no cover + """Format vertex data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + vertex = body['vertex'] + if '_oldRev' in vertex: + vertex['_old_rev'] = vertex.pop('_oldRev') + + if 'new' in body or 'old' in body: + result = {'vertex': vertex} + if 'new' in body: + result['new'] = body['new'] + if 'old' in body: + result['old'] = body['old'] + return result + else: + return vertex + + +def format_edge(body): # pragma: no cover + """Format edge data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + edge = body['edge'] + if '_oldRev' in edge: + edge['_old_rev'] = edge.pop('_oldRev') + + if 'new' in body or 'old' in body: + result = {'edge': edge} + if 'new' in body: + result['new'] = body['new'] + if 'old' in body: + result['old'] = body['old'] + return result + else: + return edge diff --git a/arango/foxx.py b/arango/foxx.py index 2f7dbe03..4f8fcd8f 100644 --- a/arango/foxx.py +++ b/arango/foxx.py @@ -2,6 +2,10 @@ __all__ = ['Foxx'] +import os + +from requests_toolbelt import MultipartEncoder + from arango.api import APIWrapper from arango.exceptions import ( FoxxServiceCreateError, @@ -44,6 +48,27 @@ def __init__(self, connection, executor): def __repr__(self): return ''.format(self._conn.db_name) + # noinspection PyMethodMayBeStatic + def _encode_file(self, filename): + """Encode file into multipart data. + + :param filename: Full path to the javascript file or zip bundle. + :type filename: str | unicode + :return: Multipart encoder object + :rtype: requests_toolbelt.MultipartEncoder + """ + extension = os.path.splitext(filename)[1] + if extension == '.js': # pragma: no cover + source_type = 'application/javascript' + elif extension == '.zip': + source_type = 'application/zip' + else: + raise ValueError('File extension must be .zip or .js') + + return MultipartEncoder( + fields={'source': (None, open(filename, 'rb'), source_type)} + ) + def services(self, exclude_system=False): """List installed services. @@ -102,7 +127,7 @@ def create_service(self, development=None, setup=None, legacy=None): - """Install a new service. + """Install a new service using JSON definition. :param mount: Service mount path (e.g "/_admin/aardvark"). :type mount: str | unicode @@ -152,6 +177,52 @@ def response_handler(resp): return self._execute(request, response_handler) + def create_service_with_file(self, + mount, + filename, + development=None, + setup=None, + legacy=None): + """Install a new service using a javascript file or zip bundle. + + :param mount: Service mount path (e.g "/_admin/aardvark"). + :type mount: str | unicode + :param filename: Full path to the javascript file or zip bundle. + :type filename: str | unicode + :param development: Enable development mode. + :type development: bool + :param setup: Run service setup script. + :type setup: bool + :param legacy: Install the service in 2.8 legacy compatibility mode. + :type legacy: bool + :return: Service metadata. + :rtype: dict + :raise arango.exceptions.FoxxServiceCreateError: If install fails. + """ + params = {'mount': mount} + if development is not None: + params['development'] = development + if setup is not None: + params['setup'] = setup + if legacy is not None: + params['legacy'] = legacy + + data = self._encode_file(filename) + request = Request( + method='post', + endpoint='/_api/foxx', + params=params, + data=data, + headers={'content-type': data.content_type} + ) + + def response_handler(resp): + if not resp.is_success: + raise FoxxServiceCreateError(resp, request) + return resp.body + + return self._execute(request, response_handler) + def update_service(self, mount, source=None, @@ -190,7 +261,9 @@ def update_service(self, if legacy is not None: params['legacy'] = legacy - data = {'source': source} + data = {} + if source is not None: + data['source'] = source if config is not None: data['configuration'] = config if dependencies is not None: @@ -210,6 +283,53 @@ def response_handler(resp): return self._execute(request, response_handler) + # TODO Remove once method is confirmed to be not supported + # def update_service_by_file(self, + # mount, + # filename=None, + # teardown=None, + # setup=None, + # legacy=None): + # """Update (upgrade) a service using a javascript file or zip bundle. + # + # :param mount: Service mount path (e.g "/_admin/aardvark"). + # :type mount: str | unicode + # :param filename: Full path to the javascript file or zip bundle. + # :type filename: str | unicode + # :param teardown: Run service teardown script. + # :type teardown: bool + # :param setup: Run service setup script. + # :type setup: bool + # :param legacy: Install the service in 2.8 legacy compatibility mode. + # :type legacy: bool + # :return: Updated service metadata. + # :rtype: dict + # :raise arango.exceptions.FoxxServiceUpdateError: If update fails. + # """ + # params = {'mount': mount} + # if teardown is not None: + # params['teardown'] = teardown + # if setup is not None: + # params['setup'] = setup + # if legacy is not None: + # params['legacy'] = legacy + # + # data = self._encode_file(filename) + # request = Request( + # method='patch', + # endpoint='/_api/foxx', + # params=params, + # data=data, + # headers={'content-type': data.content_type} + # ) + # + # def response_handler(resp): + # if not resp.is_success: + # raise FoxxServiceUpdateError(resp, request) + # return resp.body + # + # return self._execute(request, response_handler) + def replace_service(self, mount, source, @@ -253,7 +373,9 @@ def replace_service(self, if force is not None: params['force'] = force - data = {'source': source} + data = {} + if source is not None: + data['source'] = source if config is not None: data['configuration'] = config if dependencies is not None: @@ -273,6 +395,58 @@ def response_handler(resp): return self._execute(request, response_handler) + # TODO Remove once method is confirmed to be not supported + # def replace_service_by_file(self, + # mount, + # filename=None, + # teardown=None, + # setup=None, + # legacy=None, + # force=None): + # """Replace a service using a javascript file or zip bundle. + # + # :param mount: Service mount path (e.g "/_admin/aardvark"). + # :type mount: str | unicode + # :param filename: Full path to the javascript file or zip bundle. + # :type filename: str | unicode + # :param teardown: Run service teardown script. + # :type teardown: bool + # :param setup: Run service setup script. + # :type setup: bool + # :param legacy: Install the service in 2.8 legacy compatibility mode. + # :type legacy: bool + # :param force: Force install if no service is found. + # :type force: bool + # :return: Replaced service metadata. + # :rtype: dict + # :raise arango.exceptions.FoxxServiceReplaceError: If replace fails. + # """ + # params = {'mount': mount} + # if teardown is not None: + # params['teardown'] = teardown + # if setup is not None: + # params['setup'] = setup + # if legacy is not None: + # params['legacy'] = legacy + # if force is not None: + # params['force'] = force + # + # data = self._encode_file(filename) + # request = Request( + # method='put', + # endpoint='/_api/foxx', + # params=params, + # data=data, + # headers={'content-type': data.content_type} + # ) + # + # def response_handler(resp): + # if not resp.is_success: + # raise FoxxServiceReplaceError(resp, request) + # return resp.body + # + # return self._execute(request, response_handler) + def delete_service(self, mount, teardown=None): """Uninstall a service. diff --git a/arango/http.py b/arango/http.py index 708841d3..2621b7bc 100644 --- a/arango/http.py +++ b/arango/http.py @@ -106,7 +106,7 @@ def send_request(self, params=params, data=data, headers=headers, - auth=auth, + auth=auth ) return Response( method=response.request.method, diff --git a/arango/request.py b/arango/request.py index 2fd7ec9d..7dc698bf 100644 --- a/arango/request.py +++ b/arango/request.py @@ -15,7 +15,8 @@ class Request(object): :param params: URL parameters. :type params: dict :param data: Request payload. - :type data: str | unicode | bool | int | list | dict + :type data: str | unicode | bool | int | list | dict | + requests_toolbelt.MultipartEncoder :param read: Names of collections read during transaction. :type read: str | unicode | [str | unicode] :param write: Name(s) of collections written to during transaction with @@ -26,7 +27,6 @@ class Request(object): :type exclusive: str | unicode | [str | unicode] :param deserialize: Whether the response body can be deserialized. :type deserialize: bool - :ivar method: HTTP method in lowercase (e.g. "post"). :vartype method: str | unicode :ivar endpoint: API endpoint. @@ -46,7 +46,7 @@ class Request(object): with exclusive access. :vartype exclusive: str | unicode | [str | unicode] | None :ivar deserialize: Whether the response body can be deserialized. - :vartype deserialize: str | unicode | [str | unicode] | None + :vartype deserialize: bool """ __slots__ = ( @@ -58,7 +58,8 @@ class Request(object): 'read', 'write', 'exclusive', - 'deserialize' + 'deserialize', + 'files' ) def __init__(self, @@ -73,11 +74,13 @@ def __init__(self, deserialize=True): self.method = method self.endpoint = endpoint - self.headers = headers or {} - - # Insert default headers. - self.headers['content-type'] = 'application/json' - self.headers['charset'] = 'utf-8' + self.headers = { + 'content-type': 'application/json', + 'charset': 'utf-8' + } + if headers is not None: + for field in headers: + self.headers[field.lower()] = headers[field] # Sanitize URL params. if params is not None: diff --git a/arango/version.py b/arango/version.py index 4dc2ef26..1d4672ff 100644 --- a/arango/version.py +++ b/arango/version.py @@ -1 +1 @@ -__version__ = '5.2.1' +__version__ = '5.3.0' diff --git a/docs/admin.rst b/docs/admin.rst index 76cd4452..01a22947 100644 --- a/docs/admin.rst +++ b/docs/admin.rst @@ -57,4 +57,7 @@ database. # Reload the routing collection. sys_db.reload_routing() + # Retrieve server metrics. + sys_db.metrics() + See :ref:`StandardDatabase` for API specification. \ No newline at end of file diff --git a/docs/foxx.rst b/docs/foxx.rst index 13dc4955..d76ec879 100644 --- a/docs/foxx.rst +++ b/docs/foxx.rst @@ -28,7 +28,7 @@ information, refer to `ArangoDB manual`_. # List services. foxx.services() - # Create a service. + # Create a service using source on server. foxx.create_service( mount=service_mount, source='/tmp/service.zip', @@ -91,4 +91,34 @@ information, refer to `ArangoDB manual`_. # Delete a service. foxx.delete_service(service_mount) +You can also create a Foxx service by providing a file directly. + +.. code-block:: python + + from arango import ArangoClient + + # Initialize the ArangoDB client. + client = ArangoClient() + + # Connect to "_system" database as root user. + db = client.db('_system', username='root', password='passwd') + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount point. + service_mount = '/test_mount' + + # Create a service by providing a file directly. + foxx.create_service_with_file( + mount=service_mount, + source='/home/service.zip', + development=True, + setup=True, + legacy=True + ) + foxx.enable_development(service_mount) + foxx.update_config(service_mount, config={}) + foxx.update_dependencies(service_mount, dependencies={}) + See :ref:`Foxx` for API specification. diff --git a/docs/index.rst b/docs/index.rst index dc3b16ef..0411d4ae 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,7 @@ Features Compatibility ============= -- Python versions 2.7, 3.4, 3.5, 3.6 and 3.7 are supported +- Python versions 2.7, 3.5, 3.6 and 3.7 are supported - Python-arango 5.x supports ArangoDB 3.5+ - Python-arango 4.x supports ArangoDB 3.3 ~ 3.4 only - Python-arango 3.x supports ArangoDB 3.0 ~ 3.2 only diff --git a/docs/replication.rst b/docs/replication.rst index f6bdb305..290ca194 100644 --- a/docs/replication.rst +++ b/docs/replication.rst @@ -112,5 +112,4 @@ information, refer to `ArangoDB manual`_. restrict_collections=['students'] ) - See :ref:`Replication` for API specification. diff --git a/setup.py b/setup.py index aca6fe21..96821d65 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ url='https://github.com/joowani/python-arango', packages=find_packages(exclude=['tests']), include_package_data=True, - install_requires=['requests', 'six'], + install_requires=['requests', 'six', 'requests_toolbelt'], tests_require=[ 'pytest-cov', 'python-coveralls', diff --git a/tests/conftest.py b/tests/conftest.py index 87c5622c..14c974b3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -156,7 +156,8 @@ def pytest_generate_tests(metafunc): tst_conn = tst_db._conn bad_conn = bad_db._conn - if test in {'collection', 'document', 'graph', 'aql', 'index'}: + # TODO Add graph tests back + if test in {'collection', 'document', 'aql', 'index'}: # Add test transaction databases tst_txn_db = StandardDatabase(tst_conn) tst_txn_db._executor = TestTransactionExecutor(tst_conn) @@ -182,6 +183,16 @@ def pytest_generate_tests(metafunc): bad_async_db._executor = TestAsyncExecutor(bad_conn) bad_dbs.append(bad_async_db) + if test not in { + 'async', + 'batch', + 'transaction', + 'client', + 'exception', + 'view', + 'replication', + 'foxx' + }: # Add test batch databases tst_batch_db = StandardDatabase(tst_conn) tst_batch_db._executor = TestBatchExecutor(tst_conn) diff --git a/tests/helpers.py b/tests/helpers.py index cb6f6cc5..1bdbb21e 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -138,7 +138,7 @@ def empty_collection(collection): :type collection: arango.collection.StandardCollection """ for doc_id in collection.ids(): - collection.delete(doc_id) + collection.delete(doc_id, sync=True) def extract(key, items): diff --git a/tests/test_aql.py b/tests/test_aql.py index e8006f3c..3bb67baa 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -104,7 +104,8 @@ def test_aql_query_management(db, bad_db, col, docs): write_collections=[col.name], read_collections=[col.name], stream=False, - skip_inaccessible_cols=True + skip_inaccessible_cols=True, + max_runtime=0.0 ) assert cursor.id is None assert cursor.type == 'cursor' diff --git a/tests/test_collection.py b/tests/test_collection.py index 1f54f0da..db2b8240 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -168,7 +168,8 @@ def test_collection_management(db, bad_db, cluster): sync_replication=False, enforce_replication_factor=False, sharding_strategy='community-compat', - smart_join_attribute='test' + smart_join_attribute='test', + write_concern=1 ) assert db.has_collection(col_name) is True diff --git a/tests/test_cursor.py b/tests/test_cursor.py index 61adeda8..352bca39 100644 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -105,7 +105,8 @@ def test_cursor_write_query(db, col, docs): batch_size=1, ttl=1000, optimizer_rules=['+all'], - profile=True + profile=True, + max_runtime=0.0 ) cursor_id = cursor.id assert 'Cursor' in repr(cursor) diff --git a/tests/test_database.py b/tests/test_database.py index 4e1f9173..6936c110 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -14,6 +14,7 @@ ServerEndpointsError, ServerLogLevelError, ServerLogLevelSetError, + ServerMetricsError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -75,6 +76,15 @@ def test_database_misc_methods(db, bad_db, cluster): with assert_raises(ServerRequiredDBVersionError): bad_db.required_db_version() + # Test get server metrics + metrics = db.metrics() + assert isinstance(metrics, string_types) + + # Test get server statistics with bad database + with assert_raises(ServerMetricsError) as err: + bad_db.metrics() + assert err.value.error_code in {11, 1228} + # Test get server statistics statistics = db.statistics(description=False) assert isinstance(statistics, dict) @@ -231,7 +241,12 @@ def test_database_management(db, sys_db, bad_db): # Test create database db_name = generate_db_name() assert sys_db.has_database(db_name) is False - assert sys_db.create_database(db_name) is True + assert sys_db.create_database( + name=db_name, + replication_factor=1, + write_concern=1, + sharding="single" + ) is True assert sys_db.has_database(db_name) is True # Test create duplicate database diff --git a/tests/test_foxx.py b/tests/test_foxx.py index 2d753de0..5bf3a0bf 100644 --- a/tests/test_foxx.py +++ b/tests/test_foxx.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, unicode_literals import json +import os import pytest from six import string_types @@ -43,7 +44,7 @@ def test_foxx_attributes(db): assert repr(db.foxx) == ''.format(db.name) -def test_foxx_service_management(db, bad_db, cluster): +def test_foxx_service_management_json(db, bad_db, cluster): if cluster: pytest.skip('Not tested in a cluster setup') @@ -151,6 +152,81 @@ def test_foxx_service_management(db, bad_db, cluster): assert err.value.error_code == 3009 +def test_foxx_service_management_file(db, cluster): + if cluster: + pytest.skip('Not tested in a cluster setup') + + path = os.path.join(os.path.dirname(__file__), 'static', 'service.zip') + bad_path = os.path.join(os.path.dirname(__file__), 'static', 'service') + service_mount = generate_service_mount() + + # Test create service by file with wrong extension + with assert_raises(ValueError): + db.foxx.create_service_with_file(service_mount, bad_path) + + # Test create service by file + service = db.foxx.create_service_with_file( + mount=service_mount, + filename=path, + development=True, + setup=True, + legacy=True + ) + assert service['mount'] == service_mount + assert service['name'] == 'test' + assert service['development'] is True + assert service['legacy'] is True + assert service['manifest']['configuration'] == {} + assert service['manifest']['dependencies'] == {} + + # Test create duplicate service + with assert_raises(FoxxServiceCreateError) as err: + db.foxx.create_service_with_file(service_mount, path) + assert err.value.error_code == 3011 + + # Update config and dependencies + assert db.foxx.update_config(service_mount, {}) == {'values': {}} + assert db.foxx.update_dependencies(service_mount, {}) == {'values': {}} + + # # Test update service by file + # service = db.foxx.update_service_by_file( + # mount=service_mount, + # filename=path, + # teardown=False, + # setup=False, + # legacy=False + # ) + # assert service['mount'] == service_mount + # assert service['name'] == 'test' + # assert service['legacy'] is False + # + # # Test update missing service + # with assert_raises(FoxxServiceUpdateError) as err: + # db.foxx.update_service_by_file(missing_mount, path) + # assert err.value.error_code == 3009 + + # # Test replace service by file + # service = db.foxx.replace_service_by_file( + # mount=service_mount, + # filename=path, + # teardown=True, + # setup=True, + # legacy=True, + # force=False + # ) + # assert service['mount'] == service_mount + # assert service['name'] == 'test' + # assert service['legacy'] is True + # + # # Test replace missing service + # with assert_raises(FoxxServiceReplaceError) as err: + # db.foxx.replace_service_by_file(missing_mount, path) + # assert err.value.error_code == 3009 + + assert db.foxx.delete_service(service_mount, teardown=False) is True + assert service_mount not in extract('mount', db.foxx.services()) + + def test_foxx_config_management(db, cluster): if cluster: pytest.skip('Not tested in a cluster setup') diff --git a/tests/test_graph.py b/tests/test_graph.py index 5b1a4566..fa6b55ca 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -350,6 +350,13 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert len(fvcol) == 1 empty_collection(fvcol) + # Test insert vertex with return_new set to True + result = fvcol.insert({'_id': vertex_id}, return_new=True) + assert 'new' in result + assert 'vertex' in result + assert len(fvcol) == 1 + empty_collection(fvcol) + with assert_raises(DocumentParseError) as err: fvcol.insert({'_id': generate_col_name() + '/' + 'foo'}) assert 'bad collection name' in err.value.message @@ -428,6 +435,16 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert fvcol[key]['foo'] == 100 old_rev = fvcol[key]['_rev'] + # Test update vertex with return_new and return_old set to True + result = fvcol.update( + {'_key': key, 'foo': 100}, + return_old=True, + return_new=True + ) + assert 'old' in result + assert 'new' in result + assert 'vertex' in result + # Test update vertex with silent set to True assert 'bar' not in fvcol[vertex] assert fvcol.update({'_key': key, 'bar': 200}, silent=True) is True @@ -451,14 +468,14 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert fvcol[key]['bar'] == 400 old_rev = result['_rev'] - # # Test update vertex with bad revision - # if fvcol.context != 'transaction': - # new_rev = old_rev + '1' - # with assert_raises(DocumentRevisionError) as err: - # fvcol.update({'_key': key, '_rev': new_rev, 'bar': 500}) - # assert err.value.error_code == 1903 - # assert fvcol[key]['foo'] == 200 - # assert fvcol[key]['bar'] == 400 + # Test update vertex with bad revision + if fvcol.context != 'transaction': + new_rev = old_rev + '1' + with assert_raises(DocumentRevisionError) as err: + fvcol.update({'_key': key, '_rev': new_rev, 'bar': 500}) + assert err.value.error_code in {1200, 1903} + assert fvcol[key]['foo'] == 200 + assert fvcol[key]['bar'] == 400 # Test update vertex in missing vertex collection with assert_raises(DocumentUpdateError) as err: @@ -489,16 +506,6 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert result['_old_rev'] == old_rev assert 'foo' not in fvcol[key] assert fvcol[key]['bar'] is None - old_rev = result['_rev'] - - # # Test update vertex with return_new and return_old set to True - # result = fvcol.update({'_key': key}, return_new=True, return_old=True) - # assert result['_key'] == key - # assert result['_old_rev'] == old_rev - # assert 'old' in result - # assert 'new' in result - # assert 'foo' not in fvcol[key] - # assert fvcol[key]['bar'] is None # Test replace vertex with a single field change result = fvcol.replace({'_key': key, 'baz': 100}) @@ -508,6 +515,16 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert fvcol[key]['baz'] == 100 old_rev = result['_rev'] + # Test replace vertex with return_new and return_old set to True + result = fvcol.replace( + {'_key': key, 'baz': 100}, + return_old=True, + return_new=True + ) + assert 'old' in result + assert 'new' in result + assert 'vertex' in result + # Test replace vertex with silent set to True assert fvcol.replace({'_key': key, 'bar': 200}, silent=True) is True assert 'foo' not in fvcol[key] @@ -582,6 +599,12 @@ def test_vertex_management(fvcol, bad_fvcol, fvdocs): assert fvcol[vertex] is None assert vertex not in fvcol assert len(fvcol) == 2 + + # Test delete existing vertex with return_old set to True + vertex = fvdocs[1] + result = fvcol.delete(vertex, return_old=True) + assert 'old' in result + assert len(fvcol) == 1 empty_collection(fvcol) @@ -622,11 +645,19 @@ def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs): key = edge['_key'] # Test insert edge with no key - result = ecol.insert({'_from': edge['_from'], '_to': edge['_to']}) + no_key_edge = {'_from': edge['_from'], '_to': edge['_to']} + result = ecol.insert(no_key_edge) assert result['_key'] in ecol assert len(ecol) == 1 empty_collection(ecol) + # Test insert edge with return_new set to True + result = ecol.insert(no_key_edge, return_new=True) + assert 'new' in result + assert result['edge']['_key'] in ecol + assert len(ecol) == 1 + empty_collection(ecol) + # Test insert vertex with ID edge_id = ecol.name + '/' + 'foo' ecol.insert({ @@ -733,6 +764,16 @@ def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs): result = ecol.update({'_key': key, 'foo': 100}) assert result['_key'] == key assert ecol[key]['foo'] == 100 + + # Test update edge with return_old and return_new set to True + result = ecol.update( + {'_key': key, 'foo': 100}, + return_old=True, + return_new=True + ) + assert 'old' in result + assert 'new' in result + assert 'edge' in result old_rev = ecol[key]['_rev'] # Test update edge with multiple field changes @@ -801,6 +842,12 @@ def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs): result = ecol.replace(edge) assert result['_key'] == key assert ecol[key]['foo'] == 100 + + # Test replace edge with return_old and return_new set to True + result = ecol.replace(edge, return_old=True, return_new=True) + assert 'old' in result + assert 'new' in result + assert 'edge' in result old_rev = ecol[key]['_rev'] # Test replace edge with silent set to True @@ -878,6 +925,12 @@ def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs): if ecol.context != 'transaction': assert ecol[edge] is None assert edge not in ecol + + # Test delete existing edge with return_old set to True + ecol.insert(edge) + result = ecol.delete(edge, return_old=True, check_rev=False) + assert 'old' in result + assert edge not in ecol empty_collection(ecol) diff --git a/tests/test_transaction.py b/tests/test_transaction.py index b1b21ac8..5c84a2bb 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -137,3 +137,16 @@ def test_transaction_abort(db, col, docs): with pytest.raises(TransactionAbortError) as err: txn_db.abort_transaction() assert err.value.error_code in {10, 1655} + + +# def test_transaction_graph(db, graph, fvcol, fvdocs): +# txn_db = db.begin_transaction(write=fvcol.name) +# vcol = txn_db.graph(graph.name).vertex_collection(fvcol.name) +# +# vcol.insert(fvdocs[0]) +# assert len(vcol) == 1 +# +# vcol.delete(fvdocs[0]) +# assert len(vcol) == 0 +# +# txn_db.commit()