diff --git a/Pipfile b/Pipfile index 405689ede..b1591312c 100644 --- a/Pipfile +++ b/Pipfile @@ -12,7 +12,6 @@ aiohttp = "==3.8.6" requests = "==2.31.0" jinja-cli = "==1.2.2" pandas = "==2.1.2" -aioarango = "==1.0.0" ftputil = "==5.0.4" beautifulsoup4 = "==4.12.2" jsonschema = "==4.19.2" @@ -22,15 +21,12 @@ aiodns = "==3.1.1" docker = "==6.1.3" click = "==8.1.7" PyYAML = "==6.0.1" -# Unfortunately aioarango requires requests-toolbelt < 0.10.0, which is not compatible with -# urrlib3 >= 2.0.0 -# We're going to have to figure out some alternative to aioarango at some point as it seems to be -# abandonware -urllib3 = "==1.26.17" python-dateutil = "==2.8.2" numpy = "==1.26.1" jsonschema-default = "==1.6.0" mergedeep = "==1.3.4" +httpx = "==0.25.1" +requests-toolbelt = "==1.0.0" [dev-packages] pytest = "==7.4.3" @@ -41,10 +37,6 @@ ipython = "==8.17.2" pytest-asyncio = "==0.21.1" # currently for design/experiments/store_mongo.py pymongo = "==4.6.0" -# later version of dns python has an import problem with httpcore -# https://stackoverflow.com/questions/76766226/i-cant-import-pymongo -# Note aioarango forces an old version of httpcore -dnspython = "==2.3.0" sourmash = "==4.8.4" prometheus-client = "==0.18.0" diff --git a/Pipfile.lock b/Pipfile.lock index 4c7fb5757..bfbe2428a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "938126f11e8258f71dd4053f4232ae398f0ab33bf45e9f9f2dda4c5a9ff4a979" + "sha256": "6f63c275f48bb9405dc7da62c6b5090354f2da87144c1c399d2a6839dac0400b" }, "pipfile-spec": 6, "requires": { @@ -16,14 +16,6 @@ ] }, "default": { - "aioarango": { - "hashes": [ - "sha256:8a7e06814c95323a5a29c4ac73eb300f451f7e894e2a1dc5a064506a5d0a81af", - "sha256:9a8983234c252375cda763105460906565045e4886d167179d3c037e6b7ddfe6" - ], - "index": "pypi", - "version": "==1.0.0" - }, "aiodns": { "hashes": [ "sha256:1073eac48185f7a4150cad7f96a5192d6911f12b4fb894de80a088508c9b3a99", @@ -466,19 +458,19 @@ }, "h11": { "hashes": [ - "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6", - "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042" + "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", + "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761" ], - "markers": "python_version >= '3.6'", - "version": "==0.12.0" + "markers": "python_version >= '3.7'", + "version": "==0.14.0" }, "httpcore": { "hashes": [ - "sha256:036f960468759e633574d7c121afba48af6419615d36ab8ede979f1ad6276fa3", - "sha256:369aa481b014cf046f7067fddd67d00560f2f00426e79569d99cb11245134af0" + "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7", + "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535" ], - "markers": "python_version >= '3.6'", - "version": "==0.13.7" + "markers": "python_version >= '3.8'", + "version": "==1.0.2" }, "httptools": { "hashes": [ @@ -523,11 +515,11 @@ }, "httpx": { "hashes": [ - "sha256:979afafecb7d22a1d10340bafb403cf2cb75aff214426ff206521fc79d26408c", - "sha256:9f99c15d33642d38bce8405df088c1c4cfd940284b4290cacbfb02e64f4877c6" + "sha256:fec7d6cc5c27c578a391f7e87b9aa7d3d8fbcd034f6399f9f79b45bcc12a866a", + "sha256:ffd96d5cf901e63863d9f1b4b6807861dbea4d301613415d9e6e57ead15fc5d0" ], - "markers": "python_version >= '3.6'", - "version": "==0.18.2" + "index": "pypi", + "version": "==0.25.1" }, "idna": { "hashes": [ @@ -890,126 +882,122 @@ }, "pydantic": { "hashes": [ - "sha256:69bd6fb62d2d04b7055f59a396993486a2ee586c43a0b89231ce0000de07627c", - "sha256:7ce6e766c456ad026fe5712f7bcf036efc34bd5d107b3e669ef7ea01b3a9050c" + "sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb", + "sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b" ], "markers": "python_version >= '3.7'", - "version": "==2.5.0" + "version": "==2.5.1" }, "pydantic-core": { "hashes": [ - "sha256:023b6d7ec4e97890b28eb2ee24413e69a6d48de4e8b75123957edd5432f4eeb3", - "sha256:052d8731aaf844f91fe4cd3faf28983b109a5865b3a256ec550b80a5689ead87", - "sha256:0a8c8daf4e3aa3aeb98e3638fc3d58a359738f3d12590b2474c6bb64031a0764", - "sha256:0d82a6ee815388a362885186e431fac84c7a06623bc136f508e9f88261d8cadb", - "sha256:101df420e954966868b8bc992aefed5fa71dd1f2755104da62ee247abab28e2f", - "sha256:102ac85a775e77821943ae38da9634ddd774b37a8d407181b4f7b05cdfb36b55", - "sha256:1185548665bc61bbab0dc78f10c8eafa0db0aa1e920fe9a451b77782b10a65cc", - "sha256:12163197fec7c95751a3c71b36dcc1909eed9959f011ffc79cc8170a6a74c826", - "sha256:130e49aa0cb316f743bc7792c36aefa39fc2221312f1d4b333b19edbdd71f2b1", - "sha256:132b40e479cb5cebbbb681f77aaceabbc8355df16c9124cff1d4060ada83cde2", - "sha256:144f2c1d5579108b6ed1193fcc9926124bd4142b0f7020a7744980d1235c8a40", - "sha256:16f4a7e1ec6b3ea98a1e108a2739710cd659d68b33fbbeaba066202cab69c7b6", - "sha256:184ff7b30c3f60e1b775378c060099285fd4b5249271046c9005f8b247b39377", - "sha256:1bfb63821ada76719ffcd703fc40dd57962e0d8c253e3c565252e6de6d3e0bc6", - "sha256:1e7208946ea9b27a8cef13822c339d4ae96e45952cc01fc4a91c7f1cb0ae2861", - "sha256:217dcbfaf429a9b8f1d54eb380908b9c778e78f31378283b30ba463c21e89d5d", - "sha256:2459cc06572730e079ec1e694e8f68c99d977b40d98748ae72ff11ef21a56b0b", - "sha256:24ba48f9d0b8d64fc5e42e1600366c3d7db701201294989aebdaca23110c02ab", - "sha256:26242e3593d4929123615bd9365dd86ef79b7b0592d64a96cd11fd83c69c9f34", - "sha256:2871daf5b2823bf77bf7d3d43825e5d904030c155affdf84b21a00a2e00821d2", - "sha256:28734bcfb8fc5b03293dec5eb5ea73b32ff767f6ef79a31f6e41dad2f5470270", - "sha256:2a7d08b39fac97540fba785fce3b21ee01a81f081a07a4d031efd791da6666f9", - "sha256:2be018a84995b6be1bbd40d6064395dbf71592a981169cf154c0885637f5f54a", - "sha256:3303113fdfaca927ef11e0c5f109e2ec196c404f9d7ba5f8ddb63cdf287ea159", - "sha256:36c3bf96f803e207a80dbcb633d82b98ff02a9faa76dd446e969424dec8e2b9f", - "sha256:3d5b2a4b3c10cad0615670cab99059441ff42e92cf793a0336f4bc611e895204", - "sha256:3f48d4afd973abbd65266ac24b24de1591116880efc7729caf6b6b94a9654c9e", - "sha256:42d5d0e9bbb50481a049bd0203224b339d4db04006b78564df2b782e2fd16ebc", - "sha256:443dc5eede7fa76b2370213e0abe881eb17c96f7d694501853c11d5d56916602", - "sha256:49ee28d65f506b2858a60745cc974ed005298ebab12693646b97641dd7c99c35", - "sha256:4f0788699a92d604f348e9c1ac5e97e304e97127ba8325c7d0af88dcc7d35bd3", - "sha256:51506e7652a2ef1d1cf763c4b51b972ff4568d1dddc96ca83931a6941f5e6389", - "sha256:53efe03cc383a83660cfdda6a3cb40ee31372cedea0fde0b2a2e55e838873ab6", - "sha256:55713d155da1e508083c4b08d0b1ad2c3054f68b8ef7eb3d3864822e456f0bb5", - "sha256:581bb606a31749a00796f5257947a0968182d7fe91e1dada41f06aeb6bfbc91a", - "sha256:5879ac4791508d8f0eb7dec71ff8521855180688dac0c55f8c99fc4d1a939845", - "sha256:587d75aec9ae50d0d63788cec38bf13c5128b3fc1411aa4b9398ebac884ab179", - "sha256:59fa83873223f856d898452c6162a390af4297756f6ba38493a67533387d85d9", - "sha256:5a1570875eb0d1479fb2270ed80c88c231aaaf68b0c3f114f35e7fb610435e4f", - "sha256:5b45b7be9f99991405ecd6f6172fb6798908a8097106ae78d5cc5cc15121bad9", - "sha256:6015beb28deb5306049ecf2519a59627e9e050892927850a884df6d5672f8c7d", - "sha256:6590ed9d13eb51b28ea17ddcc6c8dbd6050b4eb589d497105f0e13339f223b72", - "sha256:66dc0e63349ec39c1ea66622aa5c2c1f84382112afd3ab2fa0cca4fb01f7db39", - "sha256:679cc4e184f213c8227862e57340d12fd4d4d19dc0e3ddb0f653f86f01e90f94", - "sha256:69cd74e55a5326d920e7b46daa2d81c2bdb8bcf588eafb2330d981297b742ddc", - "sha256:69df82892ff00491d673b1929538efb8c8d68f534fdc6cb7fd3ac8a5852b9034", - "sha256:72c2ef3787c3b577e5d6225d73a77167b942d12cef3c1fbd5e74e55b7f881c36", - "sha256:744b807fe2733b6da3b53e8ad93e8b3ea3ee3dfc3abece4dd2824cc1f39aa343", - "sha256:7977e261cac5f99873dc2c6f044315d09b19a71c4246560e1e67593889a90978", - "sha256:798590d38c9381f07c48d13af1f1ef337cebf76ee452fcec5deb04aceced51c7", - "sha256:812beca1dcb2b722cccc7e9c620bd972cbc323321194ec2725eab3222e6ac573", - "sha256:8276bbab68a9dbe721da92d19cbc061f76655248fe24fb63969d0c3e0e5755e7", - "sha256:85bb66d661be51b2cba9ca06759264b3469d2dbb53c3e6effb3f05fec6322be6", - "sha256:871c641a83719caaa856a11dcc61c5e5b35b0db888e1a0d338fe67ce744575e2", - "sha256:893bf4fb9bfb9c4639bc12f3de323325ada4c6d60e478d5cded65453e9364890", - "sha256:8d927d042c0ef04607ee7822828b208ab045867d20477ec6593d612156798547", - "sha256:8e17f0c3ba4cb07faa0038a59ce162de584ed48ba645c8d05a5de1e40d4c21e7", - "sha256:9486e27bb3f137f33e2315be2baa0b0b983dae9e2f5f5395240178ad8e644728", - "sha256:94cf6d0274eb899d39189144dcf52814c67f9b0fd196f211420d9aac793df2da", - "sha256:97246f896b4df7fd84caa8a75a67abb95f94bc0b547665bf0889e3262b060399", - "sha256:9d59e0d7cdfe8ed1d4fcd28aad09625c715dc18976c7067e37d8a11b06f4be3e", - "sha256:a15f6e5588f7afb7f6fc4b0f4ff064749e515d34f34c666ed6e37933873d8ad8", - "sha256:a2ccdc53cb88e51c7d47d74c59630d7be844428f6b8d463055ffad6f0392d8da", - "sha256:a68a36d71c7f638dda6c9e6b67f6aabf3fa1471b198d246457bfdc7c777cdeb7", - "sha256:a7991f25b98038252363a03e6a9fe92e60fe390fda2631d238dc3b0e396632f8", - "sha256:aadf74a40a7ae49c3c1aa7d32334fe94f4f968e21dd948e301bb4ed431fb2412", - "sha256:abae6fd5504e5e438e4f6f739f8364fd9ff5a5cdca897e68363e2318af90bc28", - "sha256:ac417312bf6b7a0223ba73fb12e26b2854c93bf5b1911f7afef6d24c379b22aa", - "sha256:ad9ea86f5fc50f1b62c31184767fe0cacaa13b54fe57d38898c3776d30602411", - "sha256:b4ff385a525017f5adf6066d7f9fb309f99ade725dcf17ed623dc7dce1f85d9f", - "sha256:b89821a2c77cc1b8f2c1fc3aacd6a3ecc5df8f7e518dc3f18aef8c4dcf66003d", - "sha256:b8ff0302518dcd001bd722bbe342919c29e5066c7eda86828fe08cdc112668b8", - "sha256:b91b5ec423e88caa16777094c4b2b97f11453283e7a837e5e5e1b886abba1251", - "sha256:ba55d73a2df4771b211d0bcdea8b79454980a81ed34a1d77a19ddcc81f98c895", - "sha256:bb1c6ecb53e4b907ee8486f453dd940b8cbb509946e2b671e3bf807d310a96fc", - "sha256:bc6a4ea9f88a810cb65ccae14404da846e2a02dd5c0ad21dee712ff69d142638", - "sha256:c36987f5eb2a7856b5f5feacc3be206b4d1852a6ce799f6799dd9ffb0cba56ae", - "sha256:c6e98227eb02623d57e1fd061788837834b68bb995a869565211b9abf3de4bf4", - "sha256:c7411cd06afeb263182e38c6ca5b4f5fe4f20d91466ad7db0cd6af453a02edec", - "sha256:c8c466facec2ccdf025b0b1455b18f2c3d574d5f64d24df905d3d7b8f05d5f4e", - "sha256:c964c0cc443d6c08a2347c0e5c1fc2d85a272dc66c1a6f3cde4fc4843882ada4", - "sha256:ca942a2dc066ca5e04c27feaa8dfb9d353ddad14c6641660c565149186095343", - "sha256:cb2fd3ab67558eb16aecfb4f2db4febb4d37dc74e6b8613dc2e7160fb58158a9", - "sha256:d312ad20e3c6d179cb97c42232b53111bcd8dcdd5c1136083db9d6bdd489bc73", - "sha256:d965bdb50725a805b083f5f58d05669a85705f50a6a864e31b545c589290ee31", - "sha256:d983222223f63e323a5f497f5b85e211557a5d8fb670dc88f343784502b466ba", - "sha256:dee4682bd7947afc682d342a8d65ad1834583132383f8e801601a8698cb8d17a", - "sha256:e2be646a5155d408e68b560c0553e8a83dc7b9f90ec6e5a2fc3ff216719385db", - "sha256:e2c689439f262c29cf3fcd5364da1e64d8600facecf9eabea8643b8755d2f0de", - "sha256:e5a111f9158555582deadd202a60bd7803b6c68f406391b7cf6905adf0af6811", - "sha256:e905014815687d88cbb14bbc0496420526cf20d49f20606537d87646b70f1046", - "sha256:ebc79120e105e4bcd7865f369e3b9dbabb0d492d221e1a7f62a3e8e292550278", - "sha256:f1a30eef060e21af22c7d23349f1028de0611f522941c80efa51c05a63142c62", - "sha256:f483467c046f549572f8aca3b7128829e09ae3a9fe933ea421f7cb7c58120edb", - "sha256:f523e116879bc6714e61d447ce934676473b068069dce6563ea040381dc7a257", - "sha256:f53a3ccdc30234cb4342cec541e3e6ed87799c7ca552f0b5f44e3967a5fed526", - "sha256:fb290491f1f0786a7da4585250f1feee200fc17ff64855bdd7c42fb54526fa29", - "sha256:fc3227408808ba7df8e95eb1d8389f4ba2203bed8240b308de1d7ae66d828f24", - "sha256:fd80a2d383940eec3db6a5b59d1820f947317acc5c75482ff8d79bf700f8ad6a", - "sha256:fd937733bf2fe7d6a8bf208c12741f1f730b7bf5636033877767a75093c29b8a", - "sha256:ffba979801e3931a19cd30ed2049450820effe8f152aaa317e2fd93795d318d7" + "sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7", + "sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623", + "sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157", + "sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655", + "sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f", + "sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f", + "sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44", + "sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63", + "sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b", + "sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f", + "sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195", + "sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb", + "sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af", + "sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d", + "sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65", + "sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000", + "sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38", + "sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c", + "sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0", + "sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c", + "sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a", + "sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc", + "sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609", + "sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315", + "sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce", + "sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d", + "sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f", + "sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468", + "sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f", + "sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882", + "sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f", + "sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5", + "sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476", + "sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19", + "sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149", + "sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06", + "sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1", + "sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a", + "sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0", + "sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07", + "sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933", + "sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448", + "sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c", + "sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427", + "sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad", + "sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977", + "sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146", + "sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6", + "sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012", + "sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb", + "sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9", + "sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c", + "sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a", + "sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc", + "sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52", + "sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda", + "sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0", + "sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e", + "sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e", + "sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79", + "sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b", + "sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5", + "sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1", + "sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca", + "sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9", + "sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e", + "sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080", + "sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f", + "sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38", + "sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075", + "sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee", + "sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4", + "sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7", + "sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b", + "sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f", + "sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d", + "sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e", + "sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab", + "sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf", + "sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47", + "sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364", + "sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56", + "sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989", + "sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08", + "sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5", + "sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d", + "sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc", + "sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4", + "sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98", + "sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d", + "sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10", + "sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8", + "sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047", + "sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892", + "sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2", + "sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00", + "sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226", + "sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94", + "sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400", + "sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289", + "sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c", + "sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5", + "sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb", + "sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf", + "sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0" ], "markers": "python_version >= '3.7'", - "version": "==2.14.1" - }, - "pyjwt": { - "hashes": [ - "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de", - "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320" - ], - "markers": "python_version >= '3.7'", - "version": "==2.8.0" + "version": "==2.14.3" }, "python-dateutil": { "hashes": [ @@ -1107,20 +1095,11 @@ }, "requests-toolbelt": { "hashes": [ - "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f", - "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0" - ], - "version": "==0.9.1" - }, - "rfc3986": { - "extras": [ - "idna2008" - ], - "hashes": [ - "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835", - "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97" + "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", + "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06" ], - "version": "==1.5.0" + "index": "pypi", + "version": "==1.0.0" }, "rpds-py": { "hashes": [ @@ -1293,11 +1272,11 @@ }, "urllib3": { "hashes": [ - "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21", - "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b" + "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3", + "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54" ], - "index": "pypi", - "version": "==1.26.17" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "uvicorn": { "extras": [ @@ -1942,11 +1921,11 @@ }, "dnspython": { "hashes": [ - "sha256:224e32b03eb46be70e12ef6d64e0be123a64e621ab4c0822ff6d450d52a540b9", - "sha256:89141536394f909066cabd112e3e1a37e4e654db00a25308b0f130bc3152eb46" + "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8", + "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984" ], - "index": "pypi", - "version": "==2.3.0" + "markers": "python_version >= '3.8' and python_version < '4.0'", + "version": "==2.4.2" }, "executing": { "hashes": [ @@ -1958,51 +1937,51 @@ }, "fonttools": { "hashes": [ - "sha256:02939e423540e05843a5c2b84704f45d307144f761a42a299d9b0b481e497225", - "sha256:0bd45092788dbfb781fae299905695a3fe5c1956a515ee331c9f034da3a9d0e5", - "sha256:0c1ac78c60b155ce709b50e28321baa3813dfae648bf55ac80d5a97c70d088e0", - "sha256:0d8ed83815a125b25c10404736a2cd43d60eb6479fe2d68373418cd1822ec330", - "sha256:11e9cba26fd658a491c82fdf5dc5bdb8078ca69ca70ba5724f63a66d486fa8b3", - "sha256:147c9f5fbe12486fa186b5ccdc64a537d581e4f9bbddfbc40f2a15a55c66f54e", - "sha256:17702266ba92cef9a0d7418609f8f8b8e019192c62e8014f10b89a485af9d8ce", - "sha256:25504368ce3dbdc5df1e6bee1980674b60216c543ad2647c85333f8daf5e9dd2", - "sha256:3675499e11a2332a867c1ce98792d29615ac143186c1c1d3e1bb7a13f1929b52", - "sha256:40d96cc1395dbf82dedfd4eb127d320004088df6007383c25db676e5f42fe414", - "sha256:50aacecec89ca07ba97a63a949f9b273ccbdc105602ec4426c8a9a143f9e6aa3", - "sha256:6065578bdf96d4b69dd53d8b49ff02412b2a46d461b0d1ee5eddb81c3a953a46", - "sha256:61b794e6a9208e7ee3abf11a9f56b9c1409967817dfd732f97b44812686cab1d", - "sha256:63efb6b66c275cb2c750576ed743f9995b92bcd733b72699599c6f74dce277c6", - "sha256:6698abcbb43f68ecfe473169d5928adf0a09ab8e6439322f80bc10a80ab9195d", - "sha256:6d2d0e0d64a21b07c30206d500f8e77f4beaf80e7cc0ffd65a304a9ae1c0e197", - "sha256:6dce674ba22419a9f3822f9c1b6bd823fce11d3a34372c580216167b6d9e232b", - "sha256:6dd10d40028bf71e9604279875e4c258a36b2a42fff349fdf20141813d83cc61", - "sha256:748d21764665209d5e0729ce8386fd01c92258699db732c7dbe4c9abf9e7c392", - "sha256:753de1235ac308111f80eacb9d92b328088adfec7147fd101692cc49ad53a3fe", - "sha256:7debaae9f267702ac4e89944bbfc4e55bc2d0ef891aa6c18d6afd9184a14554a", - "sha256:8493c84ac86fdc5dde68b720959b79865cf5216a5c1ee9b4a161eac8c56dc310", - "sha256:8acf50c20bae9880169ff133768a54f587d956676d28894401835a28f450935e", - "sha256:8dd81470227c53ab78788f1b21b7e655b2c3aa66f0f670d9011f2deb64bed034", - "sha256:8e4961e26423ddd713672746c110e708c0094deae74493e21198d85f54f7d88c", - "sha256:97fb6f806694268d0c35abfc1f33662a1a96d12875a790b2b69d7b8d4fadbea5", - "sha256:994c62a46cb2cfd670edc360d87c902ee475790fbddb267abd9fd8a83199423a", - "sha256:9fa904fb50c9f1b3ffbe352c7c4ed35eb16558933f011ff74f86f33504358e4d", - "sha256:a02747ac741abe1fe994ac55b143432637d136e4a5a472e7a90574a015b57dc4", - "sha256:a04ad39ac67c523c9f8f102706ac05d7e5cee3148a3519c6afc6ffbb3f0af7aa", - "sha256:a4a8734ddb91647d9545caae4dfb4633045c5dccb2fccb2d6c2a09424d975ef1", - "sha256:ac523156bf227f009102cf33c116bcc18d6b9a05ea9a3a6eaa57e3adb42620a9", - "sha256:af89a9370dc463ffed3010af6fad1aa58998ce5eb5d94c2c2688768e6b108cc8", - "sha256:aff3c12fba2525e5b7f7ba73fab10ddac386f8019b6cf2b8701239cf9f3e2a8a", - "sha256:c142c11d28af532c7edddf191367d6acf2a2884bb4e2ba329c265f58ca865d0a", - "sha256:c1d22a61a247262f178819f0331e0692e27c88be5770bf1c2404d0d52799f711", - "sha256:caf014bcc24673b681e7f768180f063691b301e2eccd9a53c43b5eebfb448bd8", - "sha256:cc4acf7f1684d234d788cbb1976fbced4e1ae7c51abaf4314e11d1d86498ba76", - "sha256:d22d9a4cb3f0c96991d4dccab66c7377302c9ca09dcf0cbce968d73919585120", - "sha256:dc16e26668ec2ae01a37ff8293ce0326cf8c043e24fcf86fc38e6c25ddd98926", - "sha256:e775851c6884c16ed3831e461a0d5e271d9ebcd05204122d3a21ca2465a5d8c1", - "sha256:fcf6c0c2bf2b0baeb55b5e44e9d6f2b71ede808949b8ab4daca077cc3f9cfff5" + "sha256:192ebdb3bb1882b7ed3ad4b949a106ddd8b428d046ddce64df2d459f7a2db31b", + "sha256:20898476cf9c61795107b91409f4b1cf86de6e92b41095bbe900c05b5b117c96", + "sha256:2bff4f9d5edc10b29d2a2daeefd78a47289ba2f751c9bf247925b9d43c6efd79", + "sha256:2fe4eed749de2e6bf3aa05d18df04231a712a16c08974af5e67bb9f75a25d10f", + "sha256:32e8a5cebfe8f797461b02084104053b2690ebf0cc38eda5beb9ba24ce43c349", + "sha256:3302998e02a854a41c930f9f1366eb8092dbc5fe7ff636d86aeb28d232f4610a", + "sha256:345a30db8adfbb868221234fb434dd2fc5bfe27baafbaf418528f6c5a5a95584", + "sha256:35d88af2b63060ed2b50aa00d38f60edf4c0b9275a77ae1a98e8d2c03540c617", + "sha256:367aa3e81a096e9a95dfc0d5afcbd0a299d857bac6d0fe5f1614c6f3e53f447f", + "sha256:3b179a284b73802edd6d910e6384f28098cb03bd263fd87db6abb31679f68863", + "sha256:3eb365cd8ae4765973fa036aed0077ac26f37b2f8240a72c4a29cd9d8a31027f", + "sha256:42eefbb1babf81de40ab4a6ace6018c8c5a0d79ece0f986f73a9904b26ee511b", + "sha256:437204780611f9f80f74cd4402fa451e920d1c4b6cb474a0818a734b4affc477", + "sha256:4831d948bc3cea9cd8bf0c92a087f4392068bcac3b584a61a4c837c48a012337", + "sha256:4c805a0b0545fd9becf6dfe8d57e45a7c1af7fdbfd0a7d776c5e999e4edec9f5", + "sha256:50152205ed3e16c5878a006ee53ecc402acac9af68357343be1e5c36f66ccb24", + "sha256:50b43fd55089ae850a050f0c382f13fc9586279a540b646b28b9e93fbc05b8a3", + "sha256:5478a77a15d01a21c569fc4ab6f2faba852a21d0932eef02ac4c4a4b50af8070", + "sha256:5cd114cb20b491f6812aa397040b06a469563c1a01ec94c8c5d96b76d84916db", + "sha256:718599de63b337518bfa5ce67e4ae462da3dd582a74fbe805f56b3704eb334a1", + "sha256:72ec91b85391dd4b06991c0919215ecf910554df2842df32e928155ea5b74aef", + "sha256:79a6babb87d7f70f8aed88f157bbdc5d2f01ad8b01e9535ff07e43e96ad25548", + "sha256:7a8b9f22d3c147ecdc7be46f9f1e1df0523541df0535fac5bdd653726218d068", + "sha256:877e36afce69cfdbd0453a4f44b16e865ac29f06df29f10f0b822a68ab858e86", + "sha256:8c7985017e7fb2c2613fa5c440457cd45a6ea808f8d08ed70c27e02e6862cbbe", + "sha256:8f4e22c5128cb604d3b0b869eb8d3092a1c10cbe6def402ff46bb920f7169374", + "sha256:948b35e54b0c1b6acf9d63c70515051b7d400d69b61c91377cf0e8742d71c44d", + "sha256:979fc845703e0d9b35bc65379fcf34d050e04c3e0b3381a0f66b0be33183da1c", + "sha256:b1c2cb1e2a7cfeaeb40b8823f238d7e02929b3a0b53e133e757dec5e99c327c9", + "sha256:b6a77e3b994649f72fb46b0b8cfe64481b5640e5aecc2d77961300a34fe1dc4f", + "sha256:ba82ee938bd7ea16762124a650bf2529f67dfe9999f64e0ebe1ef0a04baceafd", + "sha256:ba9c407d8bd63b21910b98399aeec87e24ca9c3e62ea60c246e505c4a4df6c27", + "sha256:bcb0fde94374ba00c118d632b0b5f1f4447401313166bcb14d737322928e358f", + "sha256:c26649a6ce6f1ce4dd6748f64b18f70e39c618c6188286ab9534a949da28164c", + "sha256:c329e21502c894fe4c800e32bc3ce37c6b5ca95778d32dff17d7ebf5cac94efa", + "sha256:cde83f83919ae7569a0316e093e04022dbb8ae5217f41cf591f125dd35d4dc0d", + "sha256:dd752b778b37863cf5146d0112aafcd5693235831f09303809ab9c1e564c236b", + "sha256:e3bbca4f873d96c20757c24c70a903251a8998e1931bd888b49956f21d94b441", + "sha256:e84084cc325f888c3495df7ec25f6133be0f606efb80a9c9e072ea6064ede9ac", + "sha256:f647d270ee90f70acbf5b31a53d486ba0897624236f9056d624c4e436386a14e", + "sha256:f77b6c0add23a3f1ec8eda40015bcb8e92796f7d06a074de102a31c7d007c05b", + "sha256:fad1c74aa10b77764d3cdf3481bd181d4949e0b46f2da6f9e57543d4adbda177" ], "markers": "python_version >= '3.8'", - "version": "==4.44.1" + "version": "==4.44.3" }, "iniconfig": { "hashes": [ diff --git a/aioarango/LICENSE b/aioarango/LICENSE new file mode 100644 index 000000000..c4c701898 --- /dev/null +++ b/aioarango/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016-2021 Joohwan Oh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/aioarango/__init__.py b/aioarango/__init__.py new file mode 100644 index 000000000..94bc84eb5 --- /dev/null +++ b/aioarango/__init__.py @@ -0,0 +1,4 @@ +import aioarango.errno as errno # noqa: F401 +from aioarango.client import ArangoClient # noqa: F401 +from aioarango.exceptions import * # noqa: F401 F403 +from aioarango.http import * # noqa: F401 F403 diff --git a/aioarango/api.py b/aioarango/api.py new file mode 100644 index 000000000..556174cab --- /dev/null +++ b/aioarango/api.py @@ -0,0 +1,72 @@ +from typing import Callable, Optional, TypeVar + +from aioarango.connection import Connection +from aioarango.executor import ApiExecutor +from aioarango.request import Request +from aioarango.response import Response +from aioarango.result import Result + +T = TypeVar("T") + + +class ApiGroup: + """Base class for API groups. + + :param connection: HTTP connection. + :param executor: API executor. + """ + + def __init__(self, connection: Connection, executor: ApiExecutor) -> None: + self._conn = connection + self._executor = executor + + @property + def conn(self) -> Connection: + """Return the HTTP connection. + + :return: HTTP connection. + :rtype: aioarango.connection.BasicConnection | aioarango.connection.JwtConnection | + aioarango.connection.JwtSuperuserConnection + """ + return self._conn + + @property + def db_name(self) -> str: + """Return the name of the current database. + + :return: Database name. + :rtype: str + """ + return self._conn.db_name + + @property + def username(self) -> Optional[str]: + """Return the username. + + :returns: Username. + :rtype: str + """ + return self._conn.username + + @property + def context(self) -> str: + """Return the API execution context. + + :return: API execution context. Possible values are "default", "async", + "batch" and "transaction". + :rtype: str + """ + return self._executor.context + + async def _execute( + self, request: Request, response_handler: Callable[[Response], T] + ) -> Result[T]: + """Execute an API. + + :param request: HTTP request. + :type request: aioarango.request.Request + :param response_handler: HTTP response handler. + :type response_handler: callable + :return: API execution result. + """ + return await self._executor.execute(request, response_handler) diff --git a/aioarango/aql.py b/aioarango/aql.py new file mode 100644 index 000000000..703062f78 --- /dev/null +++ b/aioarango/aql.py @@ -0,0 +1,619 @@ +from numbers import Number +from typing import MutableMapping, Optional, Sequence, Union + +from aioarango.api import ApiGroup +from aioarango.connection import Connection +from aioarango.cursor import Cursor +from aioarango.exceptions import ( + AQLCacheClearError, + AQLCacheConfigureError, + AQLCacheEntriesError, + AQLCachePropertiesError, + AQLFunctionCreateError, + AQLFunctionDeleteError, + AQLFunctionListError, + AQLQueryClearError, + AQLQueryExecuteError, + AQLQueryExplainError, + AQLQueryKillError, + AQLQueryListError, + AQLQueryTrackingGetError, + AQLQueryTrackingSetError, + AQLQueryValidateError, +) +from aioarango.executor import ApiExecutor +from aioarango.formatter import ( + format_aql_cache, + format_aql_query, + format_aql_tracking, + format_body, + format_query_cache_entry, +) +from aioarango.request import Request +from aioarango.response import Response +from aioarango.result import Result +from aioarango.typings import Json, Jsons + + +class AQLQueryCache(ApiGroup): + """AQL Query Cache API wrapper.""" + + def __repr__(self) -> str: + return f"" + + async def properties(self) -> Result[Json]: + """Return the query cache properties. + + :return: Query cache properties. + :rtype: dict + :raise aioarango.exceptions.AQLCachePropertiesError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query-cache/properties") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLCachePropertiesError(resp, request) + return format_aql_cache(resp.body) + + return await self._execute(request, response_handler) + + async def configure( + self, + mode: Optional[str] = None, + max_results: Optional[int] = None, + max_results_size: Optional[int] = None, + max_entry_size: Optional[int] = None, + include_system: Optional[bool] = None, + ) -> Result[Json]: + """Configure the query cache properties. + + :param mode: Operation mode. Allowed values are "off", "on" and + "demand". + :type mode: str + :param max_results: Max number of query results stored per + database-specific cache. + :type max_results: int + :param max_results_size: Max cumulative size of query results stored + per database-specific cache. + :type max_results_size: int + :param max_entry_size: Max entry size of each query result stored per + database-specific cache. + :type max_entry_size: int + :param include_system: Store results of queries in system collections. + :type include_system: bool + :return: Query cache properties. + :rtype: dict + :raise aioarango.exceptions.AQLCacheConfigureError: If operation fails. + """ + data: Json = {} + if mode is not None: + data["mode"] = mode + if max_results is not None: + data["maxResults"] = max_results + if max_results_size is not None: + data["maxResultsSize"] = max_results_size + if max_entry_size is not None: + data["maxEntrySize"] = max_entry_size + if include_system is not None: + data["includeSystem"] = include_system + + request = Request( + method="put", endpoint="/_api/query-cache/properties", data=data + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLCacheConfigureError(resp, request) + return format_aql_cache(resp.body) + + return await self._execute(request, response_handler) + + async def entries(self) -> Result[Jsons]: + """Return the query cache entries. + + :return: Query cache entries. + :rtype: [dict] + :raise AQLCacheEntriesError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query-cache/entries") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AQLCacheEntriesError(resp, request) + return [format_query_cache_entry(entry) for entry in resp.body] + + return await self._execute(request, response_handler) + + async def clear(self) -> Result[bool]: + """Clear the query cache. + + :return: True if query cache was cleared successfully. + :rtype: bool + :raise aioarango.exceptions.AQLCacheClearError: If operation fails. + """ + request = Request(method="delete", endpoint="/_api/query-cache") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise AQLCacheClearError(resp, request) + return True + + return await self._execute(request, response_handler) + + +class AQL(ApiGroup): + """AQL (ArangoDB Query Language) API wrapper. + + :param connection: HTTP connection. + :param executor: API executor. + """ + + def __init__(self, connection: Connection, executor: ApiExecutor) -> None: + super().__init__(connection, executor) + + def __repr__(self) -> str: + return f"" + + @property + def cache(self) -> AQLQueryCache: + """Return the query cache API wrapper. + + :return: Query cache API wrapper. + :rtype: aioarango.aql.AQLQueryCache + """ + return AQLQueryCache(self._conn, self._executor) + + async def explain( + self, + query: str, + all_plans: bool = False, + max_plans: Optional[int] = None, + opt_rules: Optional[Sequence[str]] = None, + bind_vars: Optional[MutableMapping[str, str]] = None, + ) -> Result[Union[Json, Jsons]]: + """Inspect the query and return its metadata without executing it. + + :param query: Query to inspect. + :type query: str + :param all_plans: If set to True, all possible execution plans are + returned in the result. If set to False, only the optimal plan + is returned. + :type all_plans: bool + :param max_plans: Total number of plans generated by the optimizer. + :type max_plans: int + :param opt_rules: List of optimizer rules. + :type opt_rules: list + :param bind_vars: Bind variables for the query. + :type bind_vars: dict + :return: Execution plan, or plans if **all_plans** was set to True. + :rtype: dict | list + :raise aioarango.exceptions.AQLQueryExplainError: If explain fails. + """ + options: Json = {"allPlans": all_plans} + if max_plans is not None: + options["maxNumberOfPlans"] = max_plans + if opt_rules is not None: + options["optimizer"] = {"rules": opt_rules} + + data: Json = {"query": query, "options": options} + if bind_vars is not None: + data["bindVars"] = bind_vars + + request = Request( + method="post", + endpoint="/_api/explain", + data=data, + ) + + def response_handler(resp: Response) -> Union[Json, Jsons]: + if not resp.is_success: + raise AQLQueryExplainError(resp, request) + if "plan" in resp.body: + plan: Json = resp.body["plan"] + return plan + else: + plans: Jsons = resp.body["plans"] + return plans + + return await self._execute(request, response_handler) + + async def validate(self, query: str) -> Result[Json]: + """Parse and validate the query without executing it. + + :param query: Query to validate. + :type query: str + :return: Query details. + :rtype: dict + :raise aioarango.exceptions.AQLQueryValidateError: If validation fails. + """ + request = Request(method="post", endpoint="/_api/query", data={"query": query}) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + body = format_body(resp.body) + if "bindVars" in body: + body["bind_vars"] = body.pop("bindVars") + return body + + raise AQLQueryValidateError(resp, request) + + return await self._execute(request, response_handler) + + async def execute( + self, + query: str, + count: bool = False, + batch_size: Optional[int] = None, + ttl: Optional[Number] = None, + bind_vars: Optional[MutableMapping[str, str]] = None, + full_count: Optional[bool] = None, + max_plans: Optional[int] = None, + optimizer_rules: Optional[Sequence[str]] = None, + cache: Optional[bool] = None, + memory_limit: int = 0, + fail_on_warning: Optional[bool] = None, + profile: Optional[bool] = None, + max_transaction_size: Optional[int] = None, + max_warning_count: Optional[int] = None, + intermediate_commit_count: Optional[int] = None, + intermediate_commit_size: Optional[int] = None, + satellite_sync_wait: Optional[int] = None, + stream: Optional[bool] = None, + skip_inaccessible_cols: Optional[bool] = None, + max_runtime: Optional[Number] = None, + ) -> Result[Cursor]: + """Execute the query and return the result cursor. + + :param query: Query to execute. + :type query: str + :param count: If set to True, the total document count is included in + the result cursor. + :type count: bool + :param batch_size: Number of documents fetched by the cursor in one + round trip. + :type batch_size: int + :param ttl: Server side time-to-live for the cursor in seconds. + :type ttl: int + :param bind_vars: Bind variables for the query. + :type bind_vars: dict + :param full_count: This parameter applies only to queries with LIMIT + clauses. If set to True, the number of matched documents before + the last LIMIT clause executed is included in the cursor. This is + similar to MySQL SQL_CALC_FOUND_ROWS hint. Using this disables a + few LIMIT optimizations and may lead to a longer query execution. + :type full_count: bool + :param max_plans: Max number of plans the optimizer generates. + :type max_plans: int + :param optimizer_rules: List of optimizer rules. + :type optimizer_rules: [str] + :param cache: If set to True, the query cache is used. The operation + mode of the query cache must be set to "on" or "demand". + :type cache: bool + :param memory_limit: Max amount of memory the query is allowed to use + in bytes. If the query goes over the limit, it fails with error + "resource limit exceeded". Value 0 indicates no limit. + :type memory_limit: int + :param fail_on_warning: If set to True, the query throws an exception + instead of producing a warning. This parameter can be used during + development to catch issues early. If set to False, warnings are + returned with the query result. There is a server configuration + option "--query.fail-on-warning" for setting the default value for + this behaviour so it does not need to be set per-query. + :type fail_on_warning: bool + :param profile: Return additional profiling details in the cursor, + unless the query cache is used. + :type profile: bool + :param max_transaction_size: Transaction size limit in bytes. + :type max_transaction_size: int + :param max_warning_count: Max number of warnings returned. + :type max_warning_count: int + :param intermediate_commit_count: Max number of operations after + which an intermediate commit is performed automatically. + :type intermediate_commit_count: int + :param intermediate_commit_size: Max size of operations in bytes after + which an intermediate commit is performed automatically. + :type intermediate_commit_size: int + :param satellite_sync_wait: Number of seconds in which the server must + synchronize the satellite collections involved in the query. When + the threshold is reached, the query is stopped. Available only for + enterprise version of ArangoDB. + :type satellite_sync_wait: int | float + :param stream: If set to True, query is executed in streaming fashion: + query result is not stored server-side but calculated on the fly. + Note: long-running queries hold collection locks for as long as the + cursor exists. If set to False, query is executed right away in its + entirety. Results are either returned right away (if the result set + is small enough), or stored server-side and accessible via cursors + (while respecting the ttl). You should use this parameter only for + short-running queries or without exclusive locks. Note: parameters + **cache**, **count** and **full_count** do not work for streaming + queries. Query statistics, warnings and profiling data are made + available only after the query is finished. Default value is False. + :type stream: bool + :param skip_inaccessible_cols: If set to True, collections without user + access are skipped, and query executes normally instead of raising + an error. This helps certain use cases: a graph may contain several + collections, and users with different access levels may execute the + same query. This parameter lets you limit the result set by user + access. Cannot be used in :doc:`transactions ` and is + available only for enterprise version of ArangoDB. Default value is + False. + :type skip_inaccessible_cols: bool + :param max_runtime: Query must be executed within this given timeout or + it is killed. The value is specified in seconds. Default value + is 0.0 (no timeout). + :type max_runtime: int | float + :return: Result cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.AQLQueryExecuteError: If execute fails. + """ + data: Json = {"query": query, "count": count} + if batch_size is not None: + data["batchSize"] = batch_size + if ttl is not None: + data["ttl"] = ttl + if bind_vars is not None: + data["bindVars"] = bind_vars + if cache is not None: + data["cache"] = cache + if memory_limit is not None: + data["memoryLimit"] = memory_limit + + options: Json = {} + if full_count is not None: + options["fullCount"] = full_count + if max_plans is not None: + options["maxNumberOfPlans"] = max_plans + if optimizer_rules is not None: + options["optimizer"] = {"rules": optimizer_rules} + if fail_on_warning is not None: + options["failOnWarning"] = fail_on_warning + if profile is not None: + options["profile"] = profile + if max_transaction_size is not None: + options["maxTransactionSize"] = max_transaction_size + if max_warning_count is not None: + options["maxWarningCount"] = max_warning_count + if intermediate_commit_count is not None: + options["intermediateCommitCount"] = intermediate_commit_count + if intermediate_commit_size is not None: + options["intermediateCommitSize"] = intermediate_commit_size + if satellite_sync_wait is not None: + options["satelliteSyncWait"] = satellite_sync_wait + if stream is not None: + options["stream"] = stream + if skip_inaccessible_cols is not None: + options["skipInaccessibleCollections"] = skip_inaccessible_cols + if max_runtime is not None: + options["maxRuntime"] = max_runtime + + if options: + data["options"] = options + data.update(options) + + request = Request(method="post", endpoint="/_api/cursor", data=data) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise AQLQueryExecuteError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def kill(self, query_id: str) -> Result[bool]: + """Kill a running query. + + :param query_id: Query ID. + :type query_id: str + :return: True if kill request was sent successfully. + :rtype: bool + :raise aioarango.exceptions.AQLQueryKillError: If the send fails. + """ + request = Request(method="delete", endpoint=f"/_api/query/{query_id}") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise AQLQueryKillError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def queries(self) -> Result[Jsons]: + """Return the currently running AQL queries. + + :return: Running AQL queries. + :rtype: [dict] + :raise aioarango.exceptions.AQLQueryListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query/current") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AQLQueryListError(resp, request) + return [format_aql_query(q) for q in resp.body] + + return await self._execute(request, response_handler) + + async def slow_queries(self) -> Result[Jsons]: + """Return a list of all slow AQL queries. + + :return: Slow AQL queries. + :rtype: [dict] + :raise aioarango.exceptions.AQLQueryListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query/slow") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AQLQueryListError(resp, request) + return [format_aql_query(q) for q in resp.body] + + return await self._execute(request, response_handler) + + async def clear_slow_queries(self) -> Result[bool]: + """Clear slow AQL queries. + + :return: True if slow queries were cleared successfully. + :rtype: bool + :raise aioarango.exceptions.AQLQueryClearError: If operation fails. + """ + request = Request(method="delete", endpoint="/_api/query/slow") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise AQLQueryClearError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def tracking(self) -> Result[Json]: + """Return AQL query tracking properties. + + :return: AQL query tracking properties. + :rtype: dict + :raise aioarango.exceptions.AQLQueryTrackingGetError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query/properties") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLQueryTrackingGetError(resp, request) + return format_aql_tracking(resp.body) + + return await self._execute(request, response_handler) + + async def set_tracking( + self, + enabled: Optional[bool] = None, + max_slow_queries: Optional[int] = None, + slow_query_threshold: Optional[int] = None, + max_query_string_length: Optional[int] = None, + track_bind_vars: Optional[bool] = None, + track_slow_queries: Optional[bool] = None, + ) -> Result[Json]: + """Configure AQL query tracking properties + + :param enabled: Track queries if set to True. + :type enabled: bool + :param max_slow_queries: Max number of slow queries to track. Oldest entries + are discarded first. + :type max_slow_queries: int + :param slow_query_threshold: Runtime threshold (in seconds) for treating a + query as slow. + :type slow_query_threshold: int + :param max_query_string_length: Max query string length (in bytes) tracked. + :type max_query_string_length: int + :param track_bind_vars: If set to True, track bind variables used in queries. + :type track_bind_vars: bool + :param track_slow_queries: If set to True, track slow queries whose runtimes + exceed **slow_query_threshold**. To use this, parameter **enabled** must + be set to True. + :type track_slow_queries: bool + :return: Updated AQL query tracking properties. + :rtype: dict + :raise aioarango.exceptions.AQLQueryTrackingSetError: If operation fails. + """ + data: Json = {} + if enabled is not None: + data["enabled"] = enabled + if max_slow_queries is not None: + data["maxSlowQueries"] = max_slow_queries + if max_query_string_length is not None: + data["maxQueryStringLength"] = max_query_string_length + if slow_query_threshold is not None: + data["slowQueryThreshold"] = slow_query_threshold + if track_bind_vars is not None: + data["trackBindVars"] = track_bind_vars + if track_slow_queries is not None: + data["trackSlowQueries"] = track_slow_queries + + request = Request(method="put", endpoint="/_api/query/properties", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLQueryTrackingSetError(resp, request) + return format_aql_tracking(resp.body) + + return await self._execute(request, response_handler) + + async def functions(self) -> Result[Jsons]: + """List the AQL functions defined in the database. + + :return: AQL functions. + :rtype: [dict] + :raise aioarango.exceptions.AQLFunctionListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/aqlfunction") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AQLFunctionListError(resp, request) + + functions: Jsons = resp.body["result"] + for function in functions: + if "isDeterministic" in function: + function["is_deterministic"] = function.pop("isDeterministic") + + return functions + + return await self._execute(request, response_handler) + + async def create_function(self, name: str, code: str) -> Result[Json]: + """Create a new AQL function. + + :param name: AQL function name. + :type name: str + :param code: Function definition in Javascript. + :type code: str + :return: Whether the AQL function was newly created or an existing one + was replaced. + :rtype: dict + :raise aioarango.exceptions.AQLFunctionCreateError: If create fails. + """ + request = Request( + method="post", + endpoint="/_api/aqlfunction", + data={"name": name, "code": code}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLFunctionCreateError(resp, request) + return {"is_new": resp.body["isNewlyCreated"]} + + return await self._execute(request, response_handler) + + async def delete_function( + self, name: str, group: bool = False, ignore_missing: bool = False + ) -> Result[Union[bool, Json]]: + """Delete an AQL function. + + :param name: AQL function name. + :type name: str + :param group: If set to True, value of parameter **name** is treated + as a namespace prefix, and all functions in the namespace are + deleted. If set to False, the value of **name** must be a fully + qualified function name including any namespaces. + :type group: bool + :param ignore_missing: Do not raise an exception on missing function. + :type ignore_missing: bool + :return: Number of AQL functions deleted if operation was successful, + False if function(s) was not found and **ignore_missing** was set + to True. + :rtype: dict | bool + :raise aioarango.exceptions.AQLFunctionDeleteError: If delete fails. + """ + request = Request( + method="delete", + endpoint=f"/_api/aqlfunction/{name}", + params={"group": group}, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.error_code == 1582 and ignore_missing: + return False + if not resp.is_success: + raise AQLFunctionDeleteError(resp, request) + return {"deleted": resp.body["deletedCount"]} + + return await self._execute(request, response_handler) diff --git a/aioarango/client.py b/aioarango/client.py new file mode 100644 index 000000000..7449fcaec --- /dev/null +++ b/aioarango/client.py @@ -0,0 +1,111 @@ +from json import dumps, loads +from typing import Sequence, Union + +from aioarango.connection import BasicConnection +from aioarango.database import StandardDatabase +from aioarango.exceptions import ServerConnectionError +from aioarango.http import DefaultHTTPClient +from aioarango.resolver import ( + RoundRobinHostResolver, + SingleHostResolver, +) +from aioarango.version import __version__ + + +class ArangoClient: + """ArangoDB client. + + :param hosts: Host URL or list of URLs (coordinators in a cluster). + :type hosts: str | [str] + """ + + def __init__( + self, + hosts: Union[str, Sequence[str]] = "http://127.0.0.1:8529", + ) -> None: + if isinstance(hosts, str): + self._hosts = [host.strip("/") for host in hosts.split(",")] + else: + self._hosts = [host.strip("/") for host in hosts] + + host_count = len(self._hosts) + + if host_count == 1: + self._host_resolver = SingleHostResolver() + else: + self._host_resolver = RoundRobinHostResolver(host_count) + + self._http = DefaultHTTPClient() + self._serializer = dumps + self._deserializer = loads + self._sessions = [self._http.create_session(h) for h in self._hosts] + + def __repr__(self) -> str: + return f"" + + async def close(self): + for session in self._sessions: + await session.aclose() + + @property + def hosts(self) -> Sequence[str]: + """Return the list of ArangoDB host URLs. + + :return: List of ArangoDB host URLs. + :rtype: [str] + """ + return self._hosts + + @property + def version(self): + """Return the client version. + + :return: Client version. + :rtype: str + """ + return __version__ + + async def db( + self, + name: str = "_system", + username: str = "root", + password: str = "", + verify: bool = False, + ) -> StandardDatabase: + """Connect to an ArangoDB database and return the database API wrapper. + + :param name: Database name. + :type name: str + :param username: Username for basic authentication. + :type username: str + :param password: Password for basic authentication. + :type password: str + :param verify: Verify the connection by sending a test request. + :type verify: bool + :return: Standard database API wrapper. + :rtype: aioarango.database.StandardDatabase + :raise aioarango.exceptions.ServerConnectionError: If **verify** was set + to True and the connection fails. + """ + + connection = BasicConnection( + hosts=self._hosts, + host_resolver=self._host_resolver, + sessions=self._sessions, + db_name=name, + username=username, + password=password, + http_client=self._http, + serializer=self._serializer, + deserializer=self._deserializer, + ) + + if verify: + try: + await connection.ping() + except ServerConnectionError as err: + raise err + except Exception as err: + raise ServerConnectionError(f"bad connection: {err}") + + return StandardDatabase(connection) diff --git a/aioarango/collection.py b/aioarango/collection.py new file mode 100644 index 000000000..d904c5999 --- /dev/null +++ b/aioarango/collection.py @@ -0,0 +1,3069 @@ +from numbers import Number +from typing import List, Optional, Sequence, Tuple, Union + +from aioarango.api import ApiGroup +from aioarango.connection import Connection +from aioarango.cursor import Cursor +from aioarango.exceptions import ( + ArangoServerError, + CollectionChecksumError, + CollectionConfigureError, + CollectionLoadError, + CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionStatisticsError, + CollectionTruncateError, + CollectionUnloadError, + DocumentCountError, + DocumentDeleteError, + DocumentGetError, + DocumentIDsError, + DocumentInError, + DocumentInsertError, + DocumentKeysError, + DocumentParseError, + DocumentReplaceError, + DocumentRevisionError, + DocumentUpdateError, + EdgeListError, + IndexCreateError, + IndexDeleteError, + IndexListError, + IndexLoadError, +) +from aioarango.executor import ApiExecutor +from aioarango.formatter import ( + format_collection, + format_edge, + format_index, + format_vertex, +) +from aioarango.request import Request +from aioarango.response import Response +from aioarango.result import Result +from aioarango.typings import Fields, Headers, Json, Params +from aioarango.utils import get_doc_id, is_none_or_int, is_none_or_str + + +class Collection(ApiGroup): + """Base class for collection API wrappers. + + :param connection: HTTP connection. + :param executor: API executor. + :param name: Collection name. + """ + + types = {2: "document", 3: "edge"} + + statuses = { + 1: "new", + 2: "unloaded", + 3: "loaded", + 4: "unloading", + 5: "deleted", + 6: "loading", + } + + def __init__( + self, connection: Connection, executor: ApiExecutor, name: str + ) -> None: + super().__init__(connection, executor) + self._name = name + self._id_prefix = name + "/" + + # def __iter__(self) -> Result[Cursor]: + # return self.all() + + # def __len__(self) -> Result[int]: + # return self.count() + + # def __contains__(self, document: Union[str, Json]) -> Result[bool]: + # return self.has(document, check_rev=False) + + def _get_status_text(self, code: int) -> str: # pragma: no cover + """Return the collection status text. + + :param code: Collection status code. + :type code: int + :return: Collection status text or None if code is None. + :rtype: str + """ + return None if code is None else self.statuses[code] + + def _validate_id(self, doc_id: str) -> str: + """Check the collection name in the document ID. + + :param doc_id: Document ID. + :type doc_id: str + :return: Verified document ID. + :rtype: str + :raise aioarango.exceptions.DocumentParseError: On bad collection name. + """ + if not doc_id.startswith(self._id_prefix): + raise DocumentParseError(f'bad collection name in document ID "{doc_id}"') + return doc_id + + def _extract_id(self, body: Json) -> str: + """Extract the document ID from document body. + + :param body: Document body. + :type body: dict + :return: Document ID. + :rtype: str + :raise aioarango.exceptions.DocumentParseError: On missing ID and key. + """ + try: + if "_id" in body: + return self._validate_id(body["_id"]) + else: + key: str = body["_key"] + return self._id_prefix + key + except KeyError: + raise DocumentParseError('field "_key" or "_id" required') + + def _prep_from_body(self, document: Json, check_rev: bool) -> Tuple[str, Headers]: + """Prepare document ID and request headers. + + :param document: Document body. + :type document: dict + :param check_rev: Whether to check the revision. + :type check_rev: bool + :return: Document ID and request headers. + :rtype: (str, dict) + """ + doc_id = self._extract_id(document) + if not check_rev or "_rev" not in document: + return doc_id, {} + return doc_id, {"If-Match": document["_rev"]} + + def _prep_from_doc( + self, document: Union[str, Json], rev: Optional[str], check_rev: bool + ) -> Tuple[str, Union[str, Json], Json]: + """Prepare document ID, body and request headers. + + :param document: Document ID, key or body. + :type document: str | dict + :param rev: Document revision or None. + :type rev: str | None + :param check_rev: Whether to check the revision. + :type check_rev: bool + :return: Document ID, body and request headers. + :rtype: (str, str | body, dict) + """ + if isinstance(document, dict): + doc_id = self._extract_id(document) + rev = rev or document.get("_rev") + + if not check_rev or rev is None: + return doc_id, doc_id, {} + else: + return doc_id, doc_id, {"If-Match": rev} + else: + if "/" in document: + doc_id = self._validate_id(document) + else: + doc_id = self._id_prefix + document + + if not check_rev or rev is None: + return doc_id, doc_id, {} + else: + return doc_id, doc_id, {"If-Match": rev} + + def _ensure_key_in_body(self, body: Json) -> Json: + """Return the document body with "_key" field populated. + + :param body: Document body. + :type body: dict + :return: Document body with "_key" field. + :rtype: dict + :raise aioarango.exceptions.DocumentParseError: On missing ID and key. + """ + if "_key" in body: + return body + elif "_id" in body: + doc_id = self._validate_id(body["_id"]) + body = body.copy() + body["_key"] = doc_id[len(self._id_prefix) :] + return body + raise DocumentParseError('field "_key" or "_id" required') + + def _ensure_key_from_id(self, body: Json) -> Json: + """Return the body with "_key" field if it has "_id" field. + + :param body: Document body. + :type body: dict + :return: Document body with "_key" field if it has "_id" field. + :rtype: dict + """ + if "_id" in body and "_key" not in body: + doc_id = self._validate_id(body["_id"]) + body = body.copy() + body["_key"] = doc_id[len(self._id_prefix) :] + return body + + @property + def name(self) -> str: + """Return collection name. + + :return: Collection name. + :rtype: str + """ + return self._name + + async def recalculate_count(self) -> Result[bool]: + """Recalculate the document count. + + :return: True if recalculation was successful. + :rtype: bool + :raise aioarango.exceptions.CollectionRecalculateCountError: If operation fails. + """ + request = Request( + method="put", + endpoint=f"/_api/collection/{self.name}/recalculateCount", + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise CollectionRecalculateCountError(resp, request) + + return await self._execute(request, response_handler) + + async def responsible_shard(self, document: Json) -> Result[str]: # pragma: no cover + """Return the ID of the shard responsible for given **document**. + + If the document does not exist, return the shard that would be + responsible. + + :return: Shard ID + :rtype: str + """ + request = Request( + method="put", + endpoint=f"/_api/collection/{self.name}/responsibleShard", + data=document, + read=self.name, + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["shardId"]) + raise CollectionResponsibleShardError(resp, request) + + return await self._execute(request, response_handler) + + async def rename(self, new_name: str) -> Result[bool]: + """Rename the collection. + + Renames may not be reflected immediately in async execution, batch + execution or transactions. It is recommended to initialize new API + wrappers after a rename. + + :param new_name: New collection name. + :type new_name: str + :return: True if collection was renamed successfully. + :rtype: bool + :raise aioarango.exceptions.CollectionRenameError: If rename fails. + """ + request = Request( + method="put", + endpoint=f"/_api/collection/{self.name}/rename", + data={"name": new_name}, + ) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise CollectionRenameError(resp, request) + self._name = new_name + self._id_prefix = new_name + "/" + return True + + return await self._execute(request, response_handler) + + async def properties(self) -> Result[Json]: + """Return collection properties. + + :return: Collection properties. + :rtype: dict + :raise aioarango.exceptions.CollectionPropertiesError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/collection/{self.name}/properties", + read=self.name, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_collection(resp.body) + raise CollectionPropertiesError(resp, request) + + return await self._execute(request, response_handler) + + async def configure( + self, sync: Optional[bool] = None, schema: Optional[Json] = None + ) -> Result[Json]: + """Configure collection properties. + + :param sync: Block until operations are synchronized to disk. + :type sync: bool | None + :param schema: document schema for validation of objects. + :type schema: dict + :return: New collection properties. + :rtype: dict + :raise aioarango.exceptions.CollectionConfigureError: If operation fails. + """ + data: Json = {} + if sync is not None: + data["waitForSync"] = sync + if schema is not None: + data["schema"] = schema + + request = Request( + method="put", + endpoint=f"/_api/collection/{self.name}/properties", + data=data, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionConfigureError(resp, request) + return format_collection(resp.body) + + return await self._execute(request, response_handler) + + async def statistics(self) -> Result[Json]: + """Return collection statistics. + + :return: Collection statistics. + :rtype: dict + :raise aioarango.exceptions.CollectionStatisticsError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/collection/{self.name}/figures", + read=self.name, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionStatisticsError(resp, request) + + stats: Json = resp.body.get("figures", resp.body) + if "documentReferences" in stats: # pragma: no cover + stats["document_refs"] = stats.pop("documentReferences") + if "lastTick" in stats: # pragma: no cover + stats["last_tick"] = stats.pop("lastTick") + if "waitingFor" in stats: # pragma: no cover + stats["waiting_for"] = stats.pop("waitingFor") + if "documentsSize" in stats: # pragma: no cover + stats["documents_size"] = stats.pop("documentsSize") + if "cacheInUse" in stats: # pragma: no cover + stats["cache_in_use"] = stats.pop("cacheInUse") + if "cacheSize" in stats: # pragma: no cover + stats["cache_size"] = stats.pop("cacheSize") + if "cacheUsage" in stats: # pragma: no cover + stats["cache_usage"] = stats.pop("cacheUsage") + if "uncollectedLogfileEntries" in stats: # pragma: no cover + stats["uncollected_logfile_entries"] = stats.pop( + "uncollectedLogfileEntries" + ) + return stats + + return await self._execute(request, response_handler) + + async def revision(self) -> Result[str]: + """Return collection revision. + + :return: Collection revision. + :rtype: str + :raise aioarango.exceptions.CollectionRevisionError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/collection/{self.name}/revision", + read=self.name, + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["revision"]) + raise CollectionRevisionError(resp, request) + + return await self._execute(request, response_handler) + + async def checksum(self, with_rev: bool = False, with_data: bool = False) -> Result[str]: + """Return collection checksum. + + :param with_rev: Include document revisions in checksum calculation. + :type with_rev: bool + :param with_data: Include document data in checksum calculation. + :type with_data: bool + :return: Collection checksum. + :rtype: str + :raise aioarango.exceptions.CollectionChecksumError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/collection/{self.name}/checksum", + params={"withRevision": with_rev, "withData": with_data}, + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["checksum"]) + raise CollectionChecksumError(resp, request) + + return await self._execute(request, response_handler) + + async def load(self) -> Result[bool]: + """Load the collection into memory. + + :return: True if collection was loaded successfully. + :rtype: bool + :raise aioarango.exceptions.CollectionLoadError: If operation fails. + """ + request = Request(method="put", endpoint=f"/_api/collection/{self.name}/load") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise CollectionLoadError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def unload(self) -> Result[bool]: + """Unload the collection from memory. + + :return: True if collection was unloaded successfully. + :rtype: bool + :raise aioarango.exceptions.CollectionUnloadError: If operation fails. + """ + request = Request(method="put", endpoint=f"/_api/collection/{self.name}/unload") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise CollectionUnloadError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def truncate(self) -> Result[bool]: + """Delete all documents in the collection. + + :return: True if collection was truncated successfully. + :rtype: bool + :raise aioarango.exceptions.CollectionTruncateError: If operation fails. + """ + request = Request( + method="put", endpoint=f"/_api/collection/{self.name}/truncate" + ) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise CollectionTruncateError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def count(self) -> Result[int]: + """Return the total document count. + + :return: Total document count. + :rtype: int + :raise aioarango.exceptions.DocumentCountError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/collection/{self.name}/count") + + def response_handler(resp: Response) -> int: + if resp.is_success: + result: int = resp.body["count"] + return result + raise DocumentCountError(resp, request) + + return await self._execute(request, response_handler) + + async def has( + self, + document: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ) -> Result[bool]: + """Check if a document exists in the collection. + + :param document: Document ID, key or body. Document body must contain + the "_id" or "_key" field. + :type document: str | dict + :param rev: Expected document revision. Overrides value of "_rev" field + in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :return: True if document exists, False otherwise. + :rtype: bool + :raise aioarango.exceptions.DocumentInError: If check fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, body, headers = self._prep_from_doc(document, rev, check_rev) + + request = Request( + method="get", + endpoint=f"/_api/document/{handle}", + headers=headers, + read=self.name, + ) + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1202: + return False + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentInError(resp, request) + return bool(resp.body) + + return await self._execute(request, response_handler) + + async def ids(self) -> Result[Cursor]: + """Return the IDs of all documents in the collection. + + :return: Document ID cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.DocumentIDsError: If retrieval fails. + """ + request = Request( + method="put", + endpoint="/_api/simple/all-keys", + data={"collection": self.name, "type": "id"}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentIDsError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def keys(self) -> Result[Cursor]: + """Return the keys of all documents in the collection. + + :return: Document key cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.DocumentKeysError: If retrieval fails. + """ + request = Request( + method="put", + endpoint="/_api/simple/all-keys", + data={"collection": self.name, "type": "key"}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentKeysError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def all( + self, skip: Optional[int] = None, limit: Optional[int] = None + ) -> Result[Cursor]: + """Return all documents in the collection. + + :param skip: Number of documents to skip. + :type skip: int | None + :param limit: Max number of documents returned. + :type limit: int | None + :return: Document cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert is_none_or_int(skip), "skip must be a non-negative int" + assert is_none_or_int(limit), "limit must be a non-negative int" + + data: Json = {"collection": self.name} + if skip is not None: + data["skip"] = skip + if limit is not None: + data["limit"] = limit + + request = Request( + method="put", endpoint="/_api/simple/all", data=data, read=self.name + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def export( + self, + limit: Optional[int] = None, + count: bool = False, + batch_size: Optional[int] = None, + flush: bool = False, + flush_wait: Optional[int] = None, + ttl: Optional[Number] = None, + filter_fields: Optional[Sequence[str]] = None, + filter_type: str = "include", + ) -> Result[Cursor]: + """Export all documents in the collection using a server cursor. + + :param flush: If set to True, flush the write-ahead log prior to the + export. If set to False, documents in the write-ahead log during + the export are not included in the result. + :type flush: bool + :param flush_wait: Max wait time in seconds for write-ahead log flush. + :type flush_wait: int | None + :param count: Include the document count in the server cursor. + :type count: bool + :param batch_size: Max number of documents in the batch fetched by + the cursor in one round trip. + :type batch_size: int | None + :param limit: Max number of documents fetched by the cursor. + :type limit: int | None + :param ttl: Time-to-live for the cursor on the server. + :type ttl: int | float | None + :param filter_fields: Document fields to filter with. + :type filter_fields: [str] | None + :param filter_type: Allowed values are "include" or "exclude". + :type filter_type: str + :return: Document cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.DocumentGetError: If export fails. + """ + data: Json = {"count": count, "flush": flush} + if flush_wait is not None: + data["flushWait"] = flush_wait + if batch_size is not None: + data["batchSize"] = batch_size + if limit is not None: + data["limit"] = limit + if ttl is not None: + data["ttl"] = ttl + if filter_fields is not None: + data["restrict"] = {"fields": filter_fields, "type": filter_type} + request = Request( + method="post", + endpoint="/_api/export", + params={"collection": self.name}, + data=data, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body, "export") + + return await self._execute(request, response_handler) + + async def find( + self, filters: Json, skip: Optional[int] = None, limit: Optional[int] = None + ) -> Result[Cursor]: + """Return all documents that match the given filters. + + :param filters: Document filters. + :type filters: dict + :param skip: Number of documents to skip. + :type skip: int | None + :param limit: Max number of documents returned. + :type limit: int | None + :return: Document cursor. + :rtype: aioarango.cursor.Cursor + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert isinstance(filters, dict), "filters must be a dict" + assert is_none_or_int(skip), "skip must be a non-negative int" + assert is_none_or_int(limit), "limit must be a non-negative int" + + data: Json = { + "collection": self.name, + "example": filters, + "skip": skip, + } + if limit is not None: + data["limit"] = limit + + request = Request( + method="put", endpoint="/_api/simple/by-example", data=data, read=self.name + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def find_near( + self, latitude: Number, longitude: Number, limit: Optional[int] = None + ) -> Result[Cursor]: + """Return documents near a given coordinate. + + Documents returned are sorted according to distance, with the nearest + document being the first. If there are documents of equal distance, + they are randomly chosen from the set until the limit is reached. A geo + index must be defined in the collection to use this method. + + :param latitude: Latitude. + :type latitude: int | float + :param longitude: Longitude. + :type longitude: int | float + :param limit: Max number of documents returned. + :type limit: int | None + :returns: Document cursor. + :rtype: aioarango.cursor.Cursor + :raises aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert isinstance(latitude, Number), "latitude must be a number" + assert isinstance(longitude, Number), "longitude must be a number" + assert is_none_or_int(limit), "limit must be a non-negative int" + + query = """ + FOR doc IN NEAR(@collection, @latitude, @longitude{}) + RETURN doc + """.format( + "" if limit is None else ", @limit " + ) + + bind_vars = { + "collection": self._name, + "latitude": latitude, + "longitude": longitude, + } + if limit is not None: + bind_vars["limit"] = limit + + request = Request( + method="post", + endpoint="/_api/cursor", + data={"query": query, "bindVars": bind_vars, "count": True}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def find_in_range( + self, + field: str, + lower: int, + upper: int, + skip: Optional[int] = None, + limit: Optional[int] = None, + ) -> Result[Cursor]: + """Return documents within a given range in a random order. + + A skiplist index must be defined in the collection to use this method. + + :param field: Document field name. + :type field: str + :param lower: Lower bound (inclusive). + :type lower: int + :param upper: Upper bound (exclusive). + :type upper: int + :param skip: Number of documents to skip. + :type skip: int | None + :param limit: Max number of documents returned. + :type limit: int | None + :returns: Document cursor. + :rtype: aioarango.cursor.Cursor + :raises aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert is_none_or_int(skip), "skip must be a non-negative int" + assert is_none_or_int(limit), "limit must be a non-negative int" + + bind_vars = { + "@collection": self._name, + "field": field, + "lower": lower, + "upper": upper, + "skip": 0 if skip is None else skip, + "limit": 2147483647 if limit is None else limit, # 2 ^ 31 - 1 + } + + query = """ + FOR doc IN @@collection + FILTER doc.@field >= @lower && doc.@field < @upper + LIMIT @skip, @limit + RETURN doc + """ + + request = Request( + method="post", + endpoint="/_api/cursor", + data={"query": query, "bindVars": bind_vars, "count": True}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def find_in_radius( + self, + latitude: Number, + longitude: Number, + radius: Number, + distance_field: Optional[str] = None, + ) -> Result[Cursor]: + """Return documents within a given radius around a coordinate. + + A geo index must be defined in the collection to use this method. + + :param latitude: Latitude. + :type latitude: int | float + :param longitude: Longitude. + :type longitude: int | float + :param radius: Max radius. + :type radius: int | float + :param distance_field: Document field used to indicate the distance to + the given coordinate. This parameter is ignored in transactions. + :type distance_field: str + :returns: Document cursor. + :rtype: aioarango.cursor.Cursor + :raises aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert isinstance(latitude, Number), "latitude must be a number" + assert isinstance(longitude, Number), "longitude must be a number" + assert isinstance(radius, Number), "radius must be a number" + assert is_none_or_str(distance_field), "distance_field must be a str" + + query = """ + FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius{}) + RETURN doc + """.format( + "" if distance_field is None else ", @distance" + ) + + bind_vars = { + "@collection": self._name, + "latitude": latitude, + "longitude": longitude, + "radius": radius, + } + if distance_field is not None: + bind_vars["distance"] = distance_field + + request = Request( + method="post", + endpoint="/_api/cursor", + data={"query": query, "bindVars": bind_vars, "count": True}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def find_in_box( + self, + latitude1: Number, + longitude1: Number, + latitude2: Number, + longitude2: Number, + skip: Optional[int] = None, + limit: Optional[int] = None, + index: Optional[str] = None, + ) -> Result[Cursor]: + """Return all documents in an rectangular area. + + :param latitude1: First latitude. + :type latitude1: int | float + :param longitude1: First longitude. + :type longitude1: int | float + :param latitude2: Second latitude. + :type latitude2: int | float + :param longitude2: Second longitude + :type longitude2: int | float + :param skip: Number of documents to skip. + :type skip: int | None + :param limit: Max number of documents returned. + :type limit: int | None + :param index: ID of the geo index to use (without the collection + prefix). This parameter is ignored in transactions. + :type index: str | None + :returns: Document cursor. + :rtype: aioarango.cursor.Cursor + :raises aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert isinstance(latitude1, Number), "latitude1 must be a number" + assert isinstance(longitude1, Number), "longitude1 must be a number" + assert isinstance(latitude2, Number), "latitude2 must be a number" + assert isinstance(longitude2, Number), "longitude2 must be a number" + assert is_none_or_int(skip), "skip must be a non-negative int" + assert is_none_or_int(limit), "limit must be a non-negative int" + + data: Json = { + "collection": self._name, + "latitude1": latitude1, + "longitude1": longitude1, + "latitude2": latitude2, + "longitude2": longitude2, + } + if skip is not None: + data["skip"] = skip + if limit is not None: + data["limit"] = limit + if index is not None: + data["geo"] = self._name + "/" + index + + request = Request( + method="put", + endpoint="/_api/simple/within-rectangle", + data=data, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def find_by_text( + self, field: str, query: str, limit: Optional[int] = None + ) -> Result[Cursor]: + """Return documents that match the given fulltext query. + + :param field: Document field with fulltext index. + :type field: str + :param query: Fulltext query. + :type query: str + :param limit: Max number of documents returned. + :type limit: int | None + :returns: Document cursor. + :rtype: aioarango.cursor.Cursor + :raises aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + assert is_none_or_int(limit), "limit must be a non-negative int" + + bind_vars: Json = { + "collection": self._name, + "field": field, + "query": query, + } + if limit is not None: + bind_vars["limit"] = limit + + aql = """ + FOR doc IN FULLTEXT(@collection, @field, @query{}) + RETURN doc + """.format( + "" if limit is None else ", @limit" + ) + + request = Request( + method="post", + endpoint="/_api/cursor", + data={"query": aql, "bindVars": bind_vars, "count": True}, + read=self.name, + ) + + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + return Cursor(self._conn, resp.body) + + return await self._execute(request, response_handler) + + async def get_many(self, documents: Sequence[Union[str, Json]]) -> Result[List[Json]]: + """Return multiple documents ignoring any missing ones. + + :param documents: List of document keys, IDs or bodies. Document bodies + must contain the "_id" or "_key" fields. + :type documents: [str | dict] + :return: Documents. Missing ones are not included. + :rtype: [dict] + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + handles = [self._extract_id(d) if isinstance(d, dict) else d for d in documents] + + request = Request( + method="put", + endpoint="/_api/simple/lookup-by-keys", + data={"collection": self.name, "keys": handles}, + read=self.name, + ) + + def response_handler(resp: Response) -> List[Json]: + if not resp.is_success: + raise DocumentGetError(resp, request) + docs = resp.body["documents"] + return [doc for doc in docs if "_id" in doc] + + return await self._execute(request, response_handler) + + async def random(self) -> Result[Json]: + """Return a random document from the collection. + + :return: A random document. + :rtype: dict + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + """ + request = Request( + method="put", + endpoint="/_api/simple/any", + data={"collection": self.name}, + read=self.name, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body["document"] + return result + raise DocumentGetError(resp, request) + + return await self._execute(request, response_handler) + + #################### + # Index Management # + #################### + + async def indexes(self) -> Result[List[Json]]: + """Return the collection indexes. + + :return: Collection indexes. + :rtype: [dict] + :raise aioarango.exceptions.IndexListError: If retrieval fails. + """ + request = Request( + method="get", + endpoint="/_api/index", + params={"collection": self.name}, + ) + + def response_handler(resp: Response) -> List[Json]: + if not resp.is_success: + raise IndexListError(resp, request) + result = resp.body["indexes"] + return [format_index(index) for index in result] + + return await self._execute(request, response_handler) + + async def _add_index(self, data: Json) -> Result[Json]: + """Helper method for creating a new index. + + :param data: Index data. + :type data: dict + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + request = Request( + method="post", + endpoint="/_api/index", + data=data, + params={"collection": self.name}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise IndexCreateError(resp, request) + return format_index(resp.body) + + return await self._execute(request, response_handler) + + async def add_hash_index( + self, + fields: Sequence[str], + unique: Optional[bool] = None, + sparse: Optional[bool] = None, + deduplicate: Optional[bool] = None, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new hash index. + + :param fields: Document fields to index. + :type fields: [str] + :param unique: Whether the index is unique. + :type unique: bool | None + :param sparse: If set to True, documents with None in the field + are also indexed. If set to False, they are skipped. + :type sparse: bool | None + :param deduplicate: If set to True, inserting duplicate index values + from the same document triggers unique constraint errors. + :type deduplicate: bool | None + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "hash", "fields": fields} + + if unique is not None: + data["unique"] = unique + if sparse is not None: + data["sparse"] = sparse + if deduplicate is not None: + data["deduplicate"] = deduplicate + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def add_skiplist_index( + self, + fields: Sequence[str], + unique: Optional[bool] = None, + sparse: Optional[bool] = None, + deduplicate: Optional[bool] = None, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new skiplist index. + + :param fields: Document fields to index. + :type fields: [str] + :param unique: Whether the index is unique. + :type unique: bool | None + :param sparse: If set to True, documents with None in the field + are also indexed. If set to False, they are skipped. + :type sparse: bool | None + :param deduplicate: If set to True, inserting duplicate index values + from the same document triggers unique constraint errors. + :type deduplicate: bool | None + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "skiplist", "fields": fields} + + if unique is not None: + data["unique"] = unique + if sparse is not None: + data["sparse"] = sparse + if deduplicate is not None: + data["deduplicate"] = deduplicate + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def add_geo_index( + self, + fields: Fields, + ordered: Optional[bool] = None, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new geo-spatial index. + + :param fields: A single document field or a list of document fields. If + a single field is given, the field must have values that are lists + with at least two floats. Documents with missing fields or invalid + values are excluded. + :type fields: str | [str] + :param ordered: Whether the order is longitude, then latitude. + :type ordered: bool | None + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "geo", "fields": fields} + + if ordered is not None: + data["geoJson"] = ordered + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def add_fulltext_index( + self, + fields: Sequence[str], + min_length: Optional[int] = None, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new fulltext index. + + :param fields: Document fields to index. + :type fields: [str] + :param min_length: Minimum number of characters to index. + :type min_length: int | None + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "fulltext", "fields": fields} + + if min_length is not None: + data["minLength"] = min_length + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def add_persistent_index( + self, + fields: Sequence[str], + unique: Optional[bool] = None, + sparse: Optional[bool] = None, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new persistent index. + + Unique persistent indexes on non-sharded keys are not supported in a + cluster. + + :param fields: Document fields to index. + :type fields: [str] + :param unique: Whether the index is unique. + :type unique: bool | None + :param sparse: Exclude documents that do not contain at least one of + the indexed fields, or documents that have a value of None in any + of the indexed fields. + :type sparse: bool | None + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "persistent", "fields": fields} + + if unique is not None: + data["unique"] = unique + if sparse is not None: + data["sparse"] = sparse + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def add_ttl_index( + self, + fields: Sequence[str], + expiry_time: int, + name: Optional[str] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new TTL (time-to-live) index. + + :param fields: Document field to index. + :type fields: [str] + :param expiry_time: Time of expiry in seconds after document creation. + :type expiry_time: int + :param name: Optional name for the index. + :type name: str | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise aioarango.exceptions.IndexCreateError: If create fails. + """ + data: Json = {"type": "ttl", "fields": fields, "expireAfter": expiry_time} + + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return await self._add_index(data) + + async def delete_index(self, index_id: str, ignore_missing: bool = False) -> Result[bool]: + """Delete an index. + + :param index_id: Index ID. + :type index_id: str + :param ignore_missing: Do not raise an exception on missing index. + :type ignore_missing: bool + :return: True if index was deleted successfully, False if index was + not found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.IndexDeleteError: If delete fails. + """ + request = Request( + method="delete", endpoint=f"/_api/index/{self.name}/{index_id}" + ) + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1212 and ignore_missing: + return False + if not resp.is_success: + raise IndexDeleteError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def load_indexes(self) -> Result[bool]: + """Cache all indexes in the collection into memory. + + :return: True if index was loaded successfully. + :rtype: bool + :raise aioarango.exceptions.IndexLoadError: If operation fails. + """ + request = Request( + method="put", + endpoint=f"/_api/collection/{self.name}/loadIndexesIntoMemory", + ) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise IndexLoadError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def insert_many( + self, + documents: Sequence[Json], + return_new: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + overwrite: bool = False, + return_old: bool = False, + ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: + """Insert multiple documents. + + .. note:: + + If inserting a document fails, the exception is not raised but + returned as an object in the result list. It is up to you to + inspect the list to determine which documents were inserted + successfully (returns document metadata) and which were not + (returns exception object). + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single insert + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param documents: List of new documents to insert. If they contain the + "_key" or "_id" fields, the values are used as the keys of the new + documents (auto-generated otherwise). Any "_rev" field is ignored. + :type documents: [dict] + :param return_new: Include bodies of the new documents in the returned + metadata. Ignored if parameter **silent** is set to True + :type return_new: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param overwrite: If set to True, operation does not fail on duplicate + keys and the existing documents are replaced. + :type overwrite: bool + :param return_old: Include body of the old documents if replaced. + Applies only when value of **overwrite** is set to True. + :type return_old: bool + :return: List of document metadata (e.g. document keys, revisions) and + any exception, or True if parameter **silent** was set to True. + :rtype: [dict | ArangoServerError] | bool + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + documents = [self._ensure_key_from_id(doc) for doc in documents] + + params: Params = { + "returnNew": return_new, + "silent": silent, + "overwrite": overwrite, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="post", + endpoint=f"/_api/document/{self.name}", + data=documents, + params=params, + ) + + def response_handler( + resp: Response, + ) -> Union[bool, List[Union[Json, ArangoServerError]]]: + if not resp.is_success: + raise DocumentInsertError(resp, request) + if silent is True: + return True + + results: List[Union[Json, ArangoServerError]] = [] + for body in resp.body: + if "_id" in body: + if "_oldRev" in body: + body["_old_rev"] = body.pop("_oldRev") + results.append(body) + else: + sub_resp = self._conn.prep_bulk_err_response(resp, body) + results.append(DocumentInsertError(sub_resp, request)) + + return results + + return await self._execute(request, response_handler) + + async def update_many( + self, + documents: Sequence[Json], + check_rev: bool = True, + merge: bool = True, + keep_none: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: + """Update multiple documents. + + .. note:: + + If updating a document fails, the exception is not raised but + returned as an object in the result list. It is up to you to + inspect the list to determine which documents were updated + successfully (returns document metadata) and which were not + (returns exception object). + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single update + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param documents: Partial or full documents with the updated values. + They must contain the "_id" or "_key" fields. + :type documents: [dict] + :param check_rev: If set to True, revisions of **documents** (if given) + are compared against the revisions of target documents. + :type check_rev: bool + :param merge: If set to True, sub-dictionaries are merged instead of + the new ones overwriting the old ones. + :type merge: bool | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. + :type keep_none: bool | None + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: List of document metadata (e.g. document keys, revisions) and + any exceptions, or True if parameter **silent** was set to True. + :rtype: [dict | ArangoError] | bool + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + """ + params: Params = { + "keepNull": keep_none, + "mergeObjects": merge, + "returnNew": return_new, + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + documents = [self._ensure_key_in_body(doc) for doc in documents] + + request = Request( + method="patch", + endpoint=f"/_api/document/{self.name}", + data=documents, + params=params, + write=self.name, + ) + + def response_handler( + resp: Response, + ) -> Union[bool, List[Union[Json, ArangoServerError]]]: + if not resp.is_success: + raise DocumentUpdateError(resp, request) + if silent is True: + return True + + results = [] + for body in resp.body: + if "_id" in body: + body["_old_rev"] = body.pop("_oldRev") + results.append(body) + else: + sub_resp = self._conn.prep_bulk_err_response(resp, body) + + error: ArangoServerError + if sub_resp.error_code == 1200: + error = DocumentRevisionError(sub_resp, request) + else: # pragma: no cover + error = DocumentUpdateError(sub_resp, request) + + results.append(error) + + return results + + return await self._execute(request, response_handler) + + async def update_match( + self, + filters: Json, + body: Json, + limit: Optional[int] = None, + keep_none: bool = True, + sync: Optional[bool] = None, + merge: bool = True, + ) -> Result[int]: + """Update matching documents. + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single update + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param filters: Document filters. + :type filters: dict + :param body: Full or partial document body with the updates. + :type body: dict + :param limit: Max number of documents to update. If the limit is lower + than the number of matched documents, random documents are + chosen. This parameter is not supported on sharded collections. + :type limit: int | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. + :type keep_none: bool | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param merge: If set to True, sub-dictionaries are merged instead of + the new ones overwriting the old ones. + :type merge: bool | None + :return: Number of documents updated. + :rtype: int + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + """ + data: Json = { + "collection": self.name, + "example": filters, + "newValue": body, + "keepNull": keep_none, + "mergeObjects": merge, + } + if limit is not None: + data["limit"] = limit + if sync is not None: + data["waitForSync"] = sync + + request = Request( + method="put", + endpoint="/_api/simple/update-by-example", + data=data, + write=self.name, + ) + + def response_handler(resp: Response) -> int: + if resp.is_success: + result: int = resp.body["updated"] + return result + raise DocumentUpdateError(resp, request) + + return await self._execute(request, response_handler) + + async def replace_many( + self, + documents: Sequence[Json], + check_rev: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: + """Replace multiple documents. + + .. note:: + + If replacing a document fails, the exception is not raised but + returned as an object in the result list. It is up to you to + inspect the list to determine which documents were replaced + successfully (returns document metadata) and which were not + (returns exception object). + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single replace + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param documents: New documents to replace the old ones with. They must + contain the "_id" or "_key" fields. Edge documents must also have + "_from" and "_to" fields. + :type documents: [dict] + :param check_rev: If set to True, revisions of **documents** (if given) + are compared against the revisions of target documents. + :type check_rev: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: List of document metadata (e.g. document keys, revisions) and + any exceptions, or True if parameter **silent** was set to True. + :rtype: [dict | ArangoServerError] | bool + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + """ + params: Params = { + "returnNew": return_new, + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + documents = [self._ensure_key_in_body(doc) for doc in documents] + + request = Request( + method="put", + endpoint=f"/_api/document/{self.name}", + params=params, + data=documents, + write=self.name, + ) + + def response_handler( + resp: Response, + ) -> Union[bool, List[Union[Json, ArangoServerError]]]: + if not resp.is_success: + raise DocumentReplaceError(resp, request) + if silent is True: + return True + + results: List[Union[Json, ArangoServerError]] = [] + for body in resp.body: + if "_id" in body: + body["_old_rev"] = body.pop("_oldRev") + results.append(body) + else: + sub_resp = self._conn.prep_bulk_err_response(resp, body) + + error: ArangoServerError + if sub_resp.error_code == 1200: + error = DocumentRevisionError(sub_resp, request) + else: # pragma: no cover + error = DocumentReplaceError(sub_resp, request) + + results.append(error) + + return results + + return await self._execute(request, response_handler) + + async def replace_match( + self, + filters: Json, + body: Json, + limit: Optional[int] = None, + sync: Optional[bool] = None, + ) -> Result[int]: + """Replace matching documents. + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single replace + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param filters: Document filters. + :type filters: dict + :param body: New document body. + :type body: dict + :param limit: Max number of documents to replace. If the limit is lower + than the number of matched documents, random documents are chosen. + :type limit: int | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :return: Number of documents replaced. + :rtype: int + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + """ + data: Json = {"collection": self.name, "example": filters, "newValue": body} + if limit is not None: + data["limit"] = limit + if sync is not None: + data["waitForSync"] = sync + + request = Request( + method="put", + endpoint="/_api/simple/replace-by-example", + data=data, + write=self.name, + ) + + def response_handler(resp: Response) -> int: + if not resp.is_success: + raise DocumentReplaceError(resp, request) + result: int = resp.body["replaced"] + return result + + return await self._execute(request, response_handler) + + async def delete_many( + self, + documents: Sequence[Json], + return_old: bool = False, + check_rev: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: + """Delete multiple documents. + + .. note:: + + If deleting a document fails, the exception is not raised but + returned as an object in the result list. It is up to you to + inspect the list to determine which documents were deleted + successfully (returns document metadata) and which were not + (returns exception object). + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single delete + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param documents: Document IDs, keys or bodies. Document bodies must + contain the "_id" or "_key" fields. + :type documents: [str | dict] + :param return_old: Include bodies of the old documents in the result. + :type return_old: bool + :param check_rev: If set to True, revisions of **documents** (if given) + are compared against the revisions of target documents. + :type check_rev: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: List of document metadata (e.g. document keys, revisions) and + any exceptions, or True if parameter **silent** was set to True. + :rtype: [dict | ArangoServerError] | bool + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + """ + params: Params = { + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + documents = [ + self._ensure_key_in_body(doc) if isinstance(doc, dict) else doc + for doc in documents + ] + + request = Request( + method="delete", + endpoint=f"/_api/document/{self.name}", + params=params, + data=documents, + write=self.name, + ) + + def response_handler( + resp: Response, + ) -> Union[bool, List[Union[Json, ArangoServerError]]]: + if not resp.is_success: + raise DocumentDeleteError(resp, request) + if silent is True: + return True + + results: List[Union[Json, ArangoServerError]] = [] + for body in resp.body: + if "_id" in body: + results.append(body) + else: + sub_resp = self._conn.prep_bulk_err_response(resp, body) + + error: ArangoServerError + if sub_resp.error_code == 1200: + error = DocumentRevisionError(sub_resp, request) + else: + error = DocumentDeleteError(sub_resp, request) + results.append(error) + + return results + + return await self._execute(request, response_handler) + + async def delete_match( + self, filters: Json, limit: Optional[int] = None, sync: Optional[bool] = None + ) -> Result[int]: + """Delete matching documents. + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single delete + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param filters: Document filters. + :type filters: dict + :param limit: Max number of documents to delete. If the limit is lower + than the number of matched documents, random documents are chosen. + :type limit: int | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :return: Number of documents deleted. + :rtype: int + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + """ + data: Json = {"collection": self.name, "example": filters} + if sync is not None: + data["waitForSync"] = sync + if limit is not None and limit != 0: + data["limit"] = limit + + request = Request( + method="put", + endpoint="/_api/simple/remove-by-example", + data=data, + write=self.name, + ) + + def response_handler(resp: Response) -> int: + if resp.is_success: + result: int = resp.body["deleted"] + return result + raise DocumentDeleteError(resp, request) + + return await self._execute(request, response_handler) + + async def import_bulk( + self, + documents: Sequence[Json], + halt_on_error: bool = True, + details: bool = True, + from_prefix: Optional[str] = None, + to_prefix: Optional[str] = None, + overwrite: Optional[bool] = None, + on_duplicate: Optional[str] = None, + sync: Optional[bool] = None, + ) -> Result[Json]: + """Insert multiple documents into the collection. + + .. note:: + + This method is faster than :func:`aioarango.collection.Collection.insert_many` + but does not return as many details. + + .. note:: + + In edge/vertex collections, this method does NOT provide the + transactional guarantees and validations that single insert + operation does for graphs. If these properties are required, see + :func:`aioarango.database.StandardDatabase.begin_batch_execution` + for an alternative approach. + + :param documents: List of new documents to insert. If they contain the + "_key" or "_id" fields, the values are used as the keys of the new + documents (auto-generated otherwise). Any "_rev" field is ignored. + :type documents: [dict] + :param halt_on_error: Halt the entire import on an error. + :type halt_on_error: bool + :param details: If set to True, the returned result will include an + additional list of detailed error messages. + :type details: bool + :param from_prefix: String prefix prepended to the value of "_from" + field in each edge document inserted. For example, prefix "foo" + prepended to "_from": "bar" will result in "_from": "foo/bar". + Applies only to edge collections. + :type from_prefix: str + :param to_prefix: String prefix prepended to the value of "_to" field + in edge document inserted. For example, prefix "foo" prepended to + "_to": "bar" will result in "_to": "foo/bar". Applies only to edge + collections. + :type to_prefix: str + :param overwrite: If set to True, all existing documents are removed + prior to the import. Indexes are still preserved. + :type overwrite: bool + :param on_duplicate: Action to take on unique key constraint violations + (for documents with "_key" fields). Allowed values are "error" (do + not import the new documents and count them as errors), "update" + (update the existing documents while preserving any fields missing + in the new ones), "replace" (replace the existing documents with + new ones), and "ignore" (do not import the new documents and count + them as ignored, as opposed to counting them as errors). Options + "update" and "replace" may fail on secondary unique key constraint + violations. + :type on_duplicate: str + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :return: Result of the bulk import. + :rtype: dict + :raise aioarango.exceptions.DocumentInsertError: If import fails. + """ + documents = [self._ensure_key_from_id(doc) for doc in documents] + + params: Params = {"type": "array", "collection": self.name} + if halt_on_error is not None: + params["complete"] = halt_on_error + if details is not None: + params["details"] = details + if from_prefix is not None: # pragma: no cover + params["fromPrefix"] = from_prefix + if to_prefix is not None: # pragma: no cover + params["toPrefix"] = to_prefix + if overwrite is not None: + params["overwrite"] = overwrite + if on_duplicate is not None: + params["onDuplicate"] = on_duplicate + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="post", + endpoint="/_api/import", + data=documents, + params=params, + write=self.name, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + raise DocumentInsertError(resp, request) + + return await self._execute(request, response_handler) + + +class StandardCollection(Collection): + """Standard ArangoDB collection API wrapper.""" + + def __repr__(self) -> str: + return f"" + + # def __getitem__(self, key: Union[str, Json]) -> Result[Optional[Json]]: + # return self.get(key) + + async def get( + self, + document: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ) -> Result[Optional[Json]]: + """Return a document. + + :param document: Document ID, key or body. Document body must contain + the "_id" or "_key" field. + :type document: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :return: Document, or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, body, headers = self._prep_from_doc(document, rev, check_rev) + + request = Request( + method="get", + endpoint=f"/_api/document/{handle}", + headers=headers, + read=self.name, + ) + + def response_handler(resp: Response) -> Optional[Json]: + if resp.error_code == 1202: + return None + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentGetError(resp, request) + + result: Json = resp.body + return result + + return await self._execute(request, response_handler) + + async def insert( + self, + document: Json, + return_new: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + overwrite: bool = False, + return_old: bool = False, + overwrite_mode: Optional[str] = None, + keep_none: Optional[bool] = None, + merge: Optional[bool] = None, + ) -> Result[Union[bool, Json]]: + """Insert a new document. + + :param document: Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + :type document: dict + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param overwrite: If set to True, operation does not fail on duplicate + key and existing document is overwritten (replace-insert). + :type overwrite: bool + :param return_old: Include body of the old document if overwritten. + Ignored if parameter **silent** is set to True. + :type return_old: bool + :param overwrite_mode: Overwrite behavior used when the document key + exists already. Allowed values are "replace" (replace-insert) or + "update" (update-insert). Implicitly sets the value of parameter + **overwrite**. + :type overwrite_mode: str | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. Applies + only when **overwrite_mode** is set to "update" (update-insert). + :type keep_none: bool | None + :param merge: If set to True (default), sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + :type merge: bool | None + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + document = self._ensure_key_from_id(document) + + params: Params = { + "returnNew": return_new, + "silent": silent, + "overwrite": overwrite, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + if overwrite_mode is not None: + params["overwriteMode"] = overwrite_mode + if keep_none is not None: + params["keepNull"] = keep_none + if merge is not None: + params["mergeObjects"] = merge + + request = Request( + method="post", + endpoint=f"/_api/document/{self.name}", + data=document, + params=params, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if not resp.is_success: + raise DocumentInsertError(resp, request) + + if silent: + return True + + result: Json = resp.body + if "_oldRev" in result: + result["_old_rev"] = result.pop("_oldRev") + return result + + return await self._execute(request, response_handler) + + async def update( + self, + document: Json, + check_rev: bool = True, + merge: bool = True, + keep_none: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Update a document. + + :param document: Partial or full document with the updated values. It + must contain the "_id" or "_key" field. + :type document: dict + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param merge: If set to True, sub-dictionaries are merged instead of + the new one overwriting the old one. + :type merge: bool | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. + :type keep_none: bool | None + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + params: Params = { + "keepNull": keep_none, + "mergeObjects": merge, + "returnNew": return_new, + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="patch", + endpoint=f"/_api/document/{self._extract_id(document)}", + data=document, + params=params, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + elif not resp.is_success: + raise DocumentUpdateError(resp, request) + if silent is True: + return True + + result: Json = resp.body + result["_old_rev"] = result.pop("_oldRev") + return result + + return await self._execute(request, response_handler) + + async def replace( + self, + document: Json, + check_rev: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace a document. + + :param document: New document to replace the old one with. It must + contain the "_id" or "_key" field. Edge document must also have + "_from" and "_to" fields. + :type document: dict + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + params: Params = { + "returnNew": return_new, + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="put", + endpoint=f"/_api/document/{self._extract_id(document)}", + params=params, + data=document, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentReplaceError(resp, request) + + if silent is True: + return True + + result: Json = resp.body + if "_oldRev" in result: + result["_old_rev"] = result.pop("_oldRev") + return result + + return await self._execute(request, response_handler) + + async def delete( + self, + document: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Delete a document. + + :param document: Document ID, key or body. Document body must contain + the "_id" or "_key" field. + :type document: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision), or True if + parameter **silent** was set to True, or False if document was not + found and **ignore_missing** was set to True (does not apply in + transactions). + :rtype: bool | dict + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, body, headers = self._prep_from_doc(document, rev, check_rev) + + params: Params = { + "returnOld": return_old, + "ignoreRevs": not check_rev, + "overwrite": not check_rev, + "silent": silent, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="delete", + endpoint=f"/_api/document/{handle}", + params=params, + headers=headers, + write=self.name, + ) + + def response_handler(resp): + if resp.error_code == 1202 and ignore_missing: + return False + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentDeleteError(resp, request) + return True if silent else resp.body + + return await self._execute(request, response_handler) + + +class VertexCollection(Collection): + """Vertex collection API wrapper. + + :param connection: HTTP connection. + :param executor: API executor. + :param graph: Graph name. + :param name: Vertex collection name. + """ + + def __init__( + self, connection: Connection, executor: ApiExecutor, graph: str, name: str + ) -> None: + super().__init__(connection, executor, name) + self._graph = graph + + def __repr__(self) -> str: + return f"" + + @property + def graph(self) -> str: + """Return the graph name. + + :return: Graph name. + :rtype: str + """ + return self._graph + + async def get( + self, + vertex: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ) -> Result[Optional[Json]]: + """Return a vertex document. + + :param vertex: Vertex document ID, key or body. Document body must + contain the "_id" or "_key" field. + :type vertex: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **vertex** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :return: Vertex document or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, body, headers = self._prep_from_doc(vertex, rev, check_rev) + + request = Request( + method="get", + endpoint=f"/_api/gharial/{self._graph}/vertex/{handle}", + headers=headers, + read=self.name, + ) + + def response_handler(resp: Response) -> Optional[Json]: + if resp.error_code == 1202: + return None + if resp.status_code == 412: + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentGetError(resp, request) + result: Json = resp.body["vertex"] + return result + + return await self._execute(request, response_handler) + + async def insert( + self, + vertex: Json, + sync: Optional[bool] = None, + silent: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new vertex document. + + :param vertex: New vertex document to insert. If it has "_key" or "_id" + field, its value is used as key of the new vertex (otherwise it is + auto-generated). Any "_rev" field is ignored. + :type vertex: dict + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision), or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + vertex = self._ensure_key_from_id(vertex) + + params: Params = {"silent": silent, "returnNew": return_new} + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="post", + endpoint=f"/_api/gharial/{self._graph}/vertex/{self.name}", + data=vertex, + params=params, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if not resp.is_success: + raise DocumentInsertError(resp, request) + if silent: + return True + return format_vertex(resp.body) + + return await self._execute(request, response_handler) + + async def update( + self, + vertex: Json, + check_rev: bool = True, + keep_none: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + return_old: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Update a vertex document. + + :param vertex: Partial or full vertex document with updated values. It + must contain the "_key" or "_id" field. + :type vertex: dict + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param keep_none: If set to True, fields with value None are retained + in the document. If set to False, they are removed completely. + :type keep_none: bool | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + vertex_id, headers = self._prep_from_body(vertex, check_rev) + + params: Params = { + "keepNull": keep_none, + "overwrite": not check_rev, + "silent": silent, + "returnNew": return_new, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="patch", + endpoint=f"/_api/gharial/{self._graph}/vertex/{vertex_id}", + headers=headers, + params=params, + data=vertex, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + elif not resp.is_success: + raise DocumentUpdateError(resp, request) + if silent is True: + return True + return format_vertex(resp.body) + + return await self._execute(request, response_handler) + + async def replace( + self, + vertex: Json, + check_rev: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + return_old: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace a vertex document. + + :param vertex: New vertex document to replace the old one with. It must + contain the "_key" or "_id" field. + :type vertex: dict + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + vertex_id, headers = self._prep_from_body(vertex, check_rev) + + params: Params = { + "silent": silent, + "returnNew": return_new, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="put", + endpoint=f"/_api/gharial/{self._graph}/vertex/{vertex_id}", + headers=headers, + params=params, + data=vertex, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + elif not resp.is_success: + raise DocumentReplaceError(resp, request) + if silent is True: + return True + return format_vertex(resp.body) + + return await self._execute(request, response_handler) + + async def delete( + self, + vertex: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + sync: Optional[bool] = None, + return_old: bool = False, + ) -> Result[Union[bool, Json]]: + """Delete a vertex document. All connected edges are also deleted. + + :param vertex: Vertex document ID, key or body. Document body must + contain the "_id" or "_key" field. + :type vertex: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **vertex** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param return_old: Return body of the old document in the result. + :type return_old: bool + :return: True if vertex was deleted successfully, False if vertex was + not found and **ignore_missing** was set to True (does not apply in + transactions). Old document is returned if **return_old** is set to + True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, _, headers = self._prep_from_doc(vertex, rev, check_rev) + + params: Params = {"returnOld": return_old} + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="delete", + endpoint=f"/_api/gharial/{self._graph}/vertex/{handle}", + params=params, + headers=headers, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.error_code == 1202 and ignore_missing: + return False + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentDeleteError(resp, request) + + result: Json = resp.body + return {"old": result["old"]} if return_old else True + + return await self._execute(request, response_handler) + + +class EdgeCollection(Collection): + """ArangoDB edge collection API wrapper. + + :param connection: HTTP connection. + :param executor: API executor. + :param graph: Graph name. + :param name: Edge collection name. + """ + + def __init__( + self, connection: Connection, executor: ApiExecutor, graph: str, name: str + ) -> None: + super().__init__(connection, executor, name) + self._graph = graph + + def __repr__(self) -> str: + return f"" + + @property + def graph(self) -> str: + """Return the graph name. + + :return: Graph name. + :rtype: str + """ + return self._graph + + async def get( + self, edge: Union[str, Json], rev: Optional[str] = None, check_rev: bool = True + ) -> Result[Optional[Json]]: + """Return an edge document. + + :param edge: Edge document ID, key or body. Document body must contain + the "_id" or "_key" field. + :type edge: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **edge** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :return: Edge document or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, body, headers = self._prep_from_doc(edge, rev, check_rev) + + request = Request( + method="get", + endpoint=f"/_api/gharial/{self._graph}/edge/{handle}", + headers=headers, + read=self.name, + ) + + def response_handler(resp: Response) -> Optional[Json]: + if resp.error_code == 1202: + return None + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentGetError(resp, request) + + result: Json = resp.body["edge"] + return result + + return await self._execute(request, response_handler) + + async def insert( + self, + edge: Json, + sync: Optional[bool] = None, + silent: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new edge document. + + :param edge: New edge document to insert. It must contain "_from" and + "_to" fields. If it has "_key" or "_id" field, its value is used + as key of the new edge document (otherwise it is auto-generated). + Any "_rev" field is ignored. + :type edge: dict + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + edge = self._ensure_key_from_id(edge) + + params: Params = {"silent": silent, "returnNew": return_new} + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="post", + endpoint=f"/_api/gharial/{self._graph}/edge/{self.name}", + data=edge, + params=params, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if not resp.is_success: + raise DocumentInsertError(resp, request) + if silent: + return True + return format_edge(resp.body) + + return await self._execute(request, response_handler) + + async def update( + self, + edge: Json, + check_rev: bool = True, + keep_none: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + return_old: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Update an edge document. + + :param edge: Partial or full edge document with updated values. It must + contain the "_key" or "_id" field. + :type edge: dict + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param keep_none: If set to True, fields with value None are retained + in the document. If set to False, they are removed completely. + :type keep_none: bool | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + edge_id, headers = self._prep_from_body(edge, check_rev) + + params: Params = { + "keepNull": keep_none, + "overwrite": not check_rev, + "silent": silent, + "returnNew": return_new, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="patch", + endpoint=f"/_api/gharial/{self._graph}/edge/{edge_id}", + headers=headers, + params=params, + data=edge, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentUpdateError(resp, request) + if silent is True: + return True + return format_edge(resp.body) + + return await self._execute(request, response_handler) + + async def replace( + self, + edge: Json, + check_rev: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + return_old: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace an edge document. + + :param edge: New edge document to replace the old one with. It must + contain the "_key" or "_id" field. It must also contain the "_from" + and "_to" fields. + :type edge: dict + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_old: Include body of the old document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_old: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + edge_id, headers = self._prep_from_body(edge, check_rev) + + params: Params = { + "silent": silent, + "returnNew": return_new, + "returnOld": return_old, + } + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="put", + endpoint=f"/_api/gharial/{self._graph}/edge/{edge_id}", + headers=headers, + params=params, + data=edge, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentReplaceError(resp, request) + if silent is True: + return True + return format_edge(resp.body) + + return await self._execute(request, response_handler) + + async def delete( + self, + edge: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + sync: Optional[bool] = None, + return_old: bool = False, + ) -> Result[Union[bool, Json]]: + """Delete an edge document. + + :param edge: Edge document ID, key or body. Document body must contain + the "_id" or "_key" field. + :type edge: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **edge** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param return_old: Return body of the old document in the result. + :type return_old: bool + :return: True if edge was deleted successfully, False if edge was not + found and **ignore_missing** was set to True (does not apply in + transactions). + :rtype: bool + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + handle, _, headers = self._prep_from_doc(edge, rev, check_rev) + + params: Params = {"returnOld": return_old} + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="delete", + endpoint=f"/_api/gharial/{self._graph}/edge/{handle}", + params=params, + headers=headers, + write=self.name, + ) + + def response_handler(resp: Response) -> Union[bool, Json]: + if resp.error_code == 1202 and ignore_missing: + return False + if resp.status_code == 412: # pragma: no cover + raise DocumentRevisionError(resp, request) + if not resp.is_success: + raise DocumentDeleteError(resp, request) + + result: Json = resp.body + return {"old": result["old"]} if return_old else True + + return await self._execute(request, response_handler) + + async def link( + self, + from_vertex: Union[str, Json], + to_vertex: Union[str, Json], + data: Optional[Json] = None, + sync: Optional[bool] = None, + silent: bool = False, + return_new: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new edge document linking the given vertices. + + :param from_vertex: "From" vertex document ID or body with "_id" field. + :type from_vertex: str | dict + :param to_vertex: "To" vertex document ID or body with "_id" field. + :type to_vertex: str | dict + :param data: Any extra data for the new edge document. If it has "_key" + or "_id" field, its value is used as key of the new edge document + (otherwise it is auto-generated). + :type data: dict | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + edge = {"_from": get_doc_id(from_vertex), "_to": get_doc_id(to_vertex)} + if data is not None: + edge.update(self._ensure_key_from_id(data)) + return await self.insert(edge, sync=sync, silent=silent, return_new=return_new) + + async def edges( + self, vertex: Union[str, Json], direction: Optional[str] = None + ) -> Result[Json]: + """Return the edge documents coming in and/or out of the vertex. + + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :param direction: The direction of the edges. Allowed values are "in" + and "out". If not set, edges in both directions are returned. + :type direction: str + :return: List of edges and statistics. + :rtype: dict + :raise aioarango.exceptions.EdgeListError: If retrieval fails. + """ + params: Params = {"vertex": get_doc_id(vertex)} + if direction is not None: + params["direction"] = direction + + request = Request( + method="get", + endpoint=f"/_api/edges/{self.name}", + params=params, + read=self.name, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise EdgeListError(resp, request) + stats = resp.body["stats"] + return { + "edges": resp.body["edges"], + "stats": { + "filtered": stats["filtered"], + "scanned_index": stats["scannedIndex"], + }, + } + + return await self._execute(request, response_handler) diff --git a/aioarango/connection.py b/aioarango/connection.py new file mode 100644 index 000000000..f3968c852 --- /dev/null +++ b/aioarango/connection.py @@ -0,0 +1,229 @@ +from abc import abstractmethod +from typing import Any, Callable, Optional, Sequence, Union + +import httpx +from requests_toolbelt import MultipartEncoder + +from aioarango.exceptions import ServerConnectionError +from aioarango.http import HTTPClient +from aioarango.request import Request +from aioarango.resolver import HostResolver +from aioarango.response import Response +from aioarango.typings import Fields, Json + +Connection = Union['BaseConnection'] + +class BaseConnection(object): + """Base connection to a specific ArangoDB database.""" + + def __init__( + self, + hosts: Fields, + host_resolver: HostResolver, + sessions: Sequence[httpx.AsyncClient], + db_name: str, + http_client: HTTPClient, + serializer: Callable[..., str], + deserializer: Callable[[str], Any], + ): + self._url_prefixes = [f"{host}/_db/{db_name}" for host in hosts] + self._host_resolver = host_resolver + self._sessions = sessions + self._db_name = db_name + self._http = http_client + self._serializer = serializer + self._deserializer = deserializer + self._username: Optional[str] = None + + @property + def db_name(self) -> str: + """Return the database name. + + :returns: Database name. + :rtype: str + """ + return self._db_name + + @property + def username(self) -> Optional[str]: + """Return the username. + + :returns: Username. + :rtype: str + """ + return self._username + + def serialize(self, obj: Any) -> str: + """Serialize the given object. + + :param obj: JSON object to serialize. + :type obj: str | bool | int | float | list | dict | None + :return: Serialized string. + :rtype: str + """ + return self._serializer(obj) + + def deserialize(self, string: str) -> Any: + """De-serialize the string and return the object. + + :param string: String to de-serialize. + :type string: str + :return: De-serialized JSON object. + :rtype: str | bool | int | float | list | dict | None + """ + try: + return self._deserializer(string) + except (ValueError, TypeError): + return string + + def prep_response(self, resp: Response, deserialize: bool = True) -> Response: + """Populate the response with details and return it. + + :param deserialize: Deserialize the response body. + :type deserialize: bool + :param resp: HTTP response. + :type resp: aioarango.response.Response + :return: HTTP response. + :rtype: aioarango.response.Response + """ + if deserialize: + resp.body = self.deserialize(resp.raw_body) + if isinstance(resp.body, dict): + resp.error_code = resp.body.get("errorNum") + resp.error_message = resp.body.get("errorMessage") + else: + resp.body = resp.raw_body + + http_ok = 200 <= resp.status_code < 300 + resp.is_success = http_ok and resp.error_code is None + return resp + + def prep_bulk_err_response(self, parent_response: Response, body: Json) -> Response: + """Build and return a bulk error response. + + :param parent_response: Parent response. + :type parent_response: aioarango.response.Response + :param body: Error response body. + :type body: dict + :return: Child bulk error response. + :rtype: aioarango.response.Response + """ + resp = Response( + method=parent_response.method, + url=parent_response.url, + headers=parent_response.headers, + status_code=parent_response.status_code, + status_text=parent_response.status_text, + raw_body=self.serialize(body), + ) + resp.body = body + resp.error_code = body["errorNum"] + resp.error_message = body["errorMessage"] + resp.is_success = False + return resp + + def normalize_data(self, data: Any) -> Union[str, MultipartEncoder, None]: + """Normalize request data. + + :param data: Request data. + :type data: str | MultipartEncoder | None + :return: Normalized data. + :rtype: str | MultipartEncoder | None + """ + if data is None: + return None + elif isinstance(data, str): + return data + elif isinstance(data, MultipartEncoder): + return data.read() + else: + return self.serialize(data) + + async def ping(self) -> int: + """Ping the next host to check if connection is established. + + :return: Response status code. + :rtype: int + """ + request = Request(method="get", endpoint="/_api/collection") + resp = await self.send_request(request) + if resp.status_code in {401, 403}: + raise ServerConnectionError("bad username and/or password") + if not resp.is_success: # pragma: no cover + raise ServerConnectionError(resp.error_message or "bad server response") + return resp.status_code + + @abstractmethod + async def send_request(self, request: Request) -> Response: # pragma: no cover + """Send an HTTP request to ArangoDB server. + + :param request: HTTP request. + :type request: aioarango.request.Request + :return: HTTP response. + :rtype: aioarango.response.Response + """ + raise NotImplementedError + + +class BasicConnection(BaseConnection): + """Connection to specific ArangoDB database using basic authentication. + + :param hosts: Host URL or list of URLs (coordinators in a cluster). + :type hosts: [str] + :param host_resolver: Host resolver (used for clusters). + :type host_resolver: aioarango.resolver.HostResolver + :param sessions: HTTP session objects per host. + :type sessions: [requests.Session] + :param db_name: Database name. + :type db_name: str + :param username: Username. + :type username: str + :param password: Password. + :type password: str + :param http_client: User-defined HTTP client. + :type http_client: aioarango.http.HTTPClient + """ + + def __init__( + self, + hosts: Fields, + host_resolver: HostResolver, + sessions: Sequence[httpx.AsyncClient], + db_name: str, + username: str, + password: str, + http_client: HTTPClient, + serializer: Callable[..., str], + deserializer: Callable[[str], Any], + ) -> None: + super().__init__( + hosts, + host_resolver, + sessions, + db_name, + http_client, + serializer, + deserializer, + ) + self._username = username + self._auth = (username, password) + + async def send_request(self, request: Request) -> Response: + """Send an HTTP request to ArangoDB server. + + :param request: HTTP request. + :type request: aioarango.request.Request + :return: HTTP response. + :rtype: aioarango.response.Response + """ + host_index = self._host_resolver.get_host_index() + resp = await self._http.send_request( + session=self._sessions[host_index], + method=request.method, + url=self._url_prefixes[host_index] + request.endpoint, + params=request.params, + data=self.normalize_data(request.data), + headers=request.headers, + auth=self._auth, + ) + return self.prep_response(resp, request.deserialize) diff --git a/aioarango/cursor.py b/aioarango/cursor.py new file mode 100644 index 000000000..fcf996b7f --- /dev/null +++ b/aioarango/cursor.py @@ -0,0 +1,290 @@ +from collections import deque +from typing import Any, Deque, Optional, Sequence + +from aioarango.connection import BaseConnection +from aioarango.exceptions import ( + CursorCloseError, + CursorCountError, + CursorEmptyError, + CursorNextError, + CursorStateError, +) +from aioarango.request import Request +from aioarango.typings import Json + + +class Cursor: + """Cursor API wrapper. + + Cursors fetch query results from ArangoDB server in batches. Cursor objects + are *stateful* as they store the fetched items in-memory. They must not be + shared across threads without proper locking mechanism. + + :param connection: HTTP connection. + :param init_data: Cursor initialization data. + :type init_data: dict + :param cursor_type: Cursor type ("cursor" or "export"). + :type cursor_type: str + """ + + __slots__ = [ + "_conn", + "_type", + "_id", + "_count", + "_cached", + "_stats", + "_profile", + "_warnings", + "_has_more", + "_batch", + ] + + def __init__( + self, + connection: BaseConnection, + init_data: Json, + cursor_type: str = "cursor", + ) -> None: + self._conn = connection + self._type = cursor_type + self._batch: Deque[Any] = deque() + self._id = None + self._count: Optional[int] = None + self._cached = None + self._stats = None + self._profile = None + self._warnings = None + self._update(init_data) + + def __aiter__(self): + return self + + async def __anext__(self): # pragma: no cover + return await self.next() + + async def __aenter__(self): + return self + + def __len__(self) -> int: + if self._count is None: + raise CursorCountError("cursor count not enabled") + return self._count + + async def __aexit__(self, *_: Any) -> None: + await self.close(ignore_missing=True) + + def __repr__(self) -> str: + return f"" if self._id else "" + + def _update(self, data: Json) -> Json: + """Update the cursor using data from ArangoDB server. + + :param data: Cursor data from ArangoDB server (e.g. results). + :type data: dict + :return: Update cursor data. + :rtype: dict + """ + result: Json = {} + + if "id" in data: + self._id = data["id"] + result["id"] = data["id"] + if "count" in data: + self._count = data["count"] + result["count"] = data["count"] + if "cached" in data: + self._cached = data["cached"] + result["cached"] = data["cached"] + + self._has_more = bool(data["hasMore"]) + result["has_more"] = data["hasMore"] + + self._batch.extend(data["result"]) + result["batch"] = data["result"] + + if "extra" in data: + extra = data["extra"] + + if "profile" in extra: + self._profile = extra["profile"] + result["profile"] = extra["profile"] + + if "warnings" in extra: + self._warnings = extra["warnings"] + result["warnings"] = extra["warnings"] + + if "stats" in extra: + stats = extra["stats"] + if "writesExecuted" in stats: + stats["modified"] = stats.pop("writesExecuted") + if "writesIgnored" in stats: + stats["ignored"] = stats.pop("writesIgnored") + if "scannedFull" in stats: + stats["scanned_full"] = stats.pop("scannedFull") + if "scannedIndex" in stats: + stats["scanned_index"] = stats.pop("scannedIndex") + if "executionTime" in stats: + stats["execution_time"] = stats.pop("executionTime") + if "httpRequests" in stats: + stats["http_requests"] = stats.pop("httpRequests") + self._stats = stats + result["statistics"] = stats + + return result + + @property + def id(self) -> Optional[str]: + """Return the cursor ID. + + :return: Cursor ID. + :rtype: str + """ + return self._id + + @property + def type(self) -> str: + """Return the cursor type. + + :return: Cursor type ("cursor" or "export"). + :rtype: str + """ + return self._type + + def batch(self) -> Optional[Deque[Any]]: + """Return the current batch of results. + + :return: Current batch. + :rtype: collections.deque + """ + return self._batch + + def has_more(self) -> Optional[bool]: + """Return True if more results are available on the server. + + :return: True if more results are available on the server. + :rtype: bool + """ + return self._has_more + + def count(self) -> Optional[int]: + """Return the total number of documents in the entire result set. + + :return: Total number of documents, or None if the count option + was not enabled during cursor initialization. + :rtype: int | None + """ + return self._count + + def cached(self) -> Optional[bool]: + """Return True if results are cached. + + :return: True if results are cached. + :rtype: bool + """ + return self._cached + + def statistics(self) -> Optional[Json]: + """Return cursor statistics. + + :return: Cursor statistics. + :rtype: dict + """ + return self._stats + + def profile(self) -> Optional[Json]: + """Return cursor performance profile. + + :return: Cursor performance profile. + :rtype: dict + """ + return self._profile + + def warnings(self) -> Optional[Sequence[Json]]: + """Return any warnings from the query execution. + + :return: Warnings, or None if there are none. + :rtype: [str] + """ + return self._warnings + + def empty(self) -> bool: + """Check if the current batch is empty. + + :return: True if current batch is empty, False otherwise. + :rtype: bool + """ + return len(self._batch) == 0 + + async def next(self) -> Any: + """Pop the next item from the current batch. + + If current batch is empty/depleted, an API request is automatically + sent to ArangoDB server to fetch the next batch and update the cursor. + + :return: Next item in current batch. + :raise StopAsyncIteration: If the result set is depleted. + :raise aioarango.exceptions.CursorNextError: If batch retrieval fails. + :raise aioarango.exceptions.CursorStateError: If cursor ID is not set. + """ + if self.empty(): + if not self.has_more(): + raise StopAsyncIteration + await self.fetch() + + return self.pop() + + def pop(self) -> Any: + """Pop the next item from current batch. + + If current batch is empty/depleted, an exception is raised. You must + call :func:`aioarango.cursor.Cursor.fetch` to manually fetch the next + batch from server. + + :return: Next item in current batch. + :raise aioarango.exceptions.CursorEmptyError: If current batch is empty. + """ + if len(self._batch) == 0: + raise CursorEmptyError("current batch is empty") + return self._batch.popleft() + + async def fetch(self) -> Json: + """Fetch the next batch from server and update the cursor. + + :return: New batch details. + :rtype: dict + :raise aioarango.exceptions.CursorNextError: If batch retrieval fails. + :raise aioarango.exceptions.CursorStateError: If cursor ID is not set. + """ + if self._id is None: + raise CursorStateError("cursor ID not set") + request = Request(method="put", endpoint=f"/_api/{self._type}/{self._id}") + resp = await self._conn.send_request(request) + + if not resp.is_success: + raise CursorNextError(resp, request) + + return self._update(resp.body) + + async def close(self, ignore_missing: bool = False) -> Optional[bool]: + """Close the cursor and free any server resources tied to it. + + :param ignore_missing: Do not raise exception on missing cursors. + :type ignore_missing: bool + :return: True if cursor was closed successfully, False if cursor was + missing on the server and **ignore_missing** was set to True, None + if there are no cursors to close server-side (e.g. result set is + smaller than the batch size). + :rtype: bool | None + :raise aioarango.exceptions.CursorCloseError: If operation fails. + :raise aioarango.exceptions.CursorStateError: If cursor ID is not set. + """ + if self._id is None: + return None + request = Request(method="delete", endpoint=f"/_api/{self._type}/{self._id}") + resp = await self._conn.send_request(request) + if resp.is_success: + return True + if resp.status_code == 404 and ignore_missing: + return False + raise CursorCloseError(resp, request) diff --git a/aioarango/database.py b/aioarango/database.py new file mode 100644 index 000000000..1d4a9c411 --- /dev/null +++ b/aioarango/database.py @@ -0,0 +1,2328 @@ +from datetime import datetime +from numbers import Number +from typing import Any, List, Optional, Sequence, Union + +from aioarango.api import ApiGroup +from aioarango.aql import AQL +from aioarango.collection import StandardCollection +from aioarango.connection import Connection +from aioarango.exceptions import ( + AnalyzerCreateError, + AnalyzerDeleteError, + AnalyzerGetError, + AnalyzerListError, + AsyncJobClearError, + AsyncJobListError, + CollectionCreateError, + CollectionDeleteError, + CollectionListError, + DatabaseCreateError, + DatabaseDeleteError, + DatabaseListError, + DatabasePropertiesError, + GraphCreateError, + GraphDeleteError, + GraphListError, + JWTSecretListError, + JWTSecretReloadError, + PermissionGetError, + PermissionListError, + PermissionResetError, + PermissionUpdateError, + ServerDetailsError, + ServerEchoError, + ServerEncryptionError, + ServerEngineError, + ServerLogLevelError, + ServerLogLevelSetError, + ServerMetricsError, + ServerReadLogError, + ServerReloadRoutingError, + ServerRequiredDBVersionError, + ServerRoleError, + ServerRunTestsError, + ServerShutdownError, + ServerStatisticsError, + ServerStatusError, + ServerTimeError, + ServerTLSError, + ServerTLSReloadError, + ServerVersionError, + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, + TransactionExecuteError, + UserCreateError, + UserDeleteError, + UserGetError, + UserListError, + UserReplaceError, + UserUpdateError, + ViewCreateError, + ViewDeleteError, + ViewGetError, + ViewListError, + ViewRenameError, + ViewReplaceError, + ViewUpdateError, +) +from aioarango.executor import DefaultApiExecutor +from aioarango.formatter import ( + format_body, + format_database, + format_server_status, + format_tls, + format_view, +) +from aioarango.graph import Graph +from aioarango.request import Request +from aioarango.response import Response +from aioarango.result import Result +from aioarango.typings import Json, Jsons, Params +from aioarango.utils import get_col_name + + +class Database(ApiGroup): + """Base class for Database API wrappers.""" + + def __getitem__(self, name: str) -> StandardCollection: + """Return the collection API wrapper. + + :param name: Collection name. + :type name: str + :return: Collection API wrapper. + :rtype: aioarango.collection.StandardCollection + """ + return self.collection(name) + + def _get_col_by_doc(self, document: Union[str, Json]) -> StandardCollection: + """Return the collection of the given document. + + :param document: Document ID or body with "_id" field. + :type document: str | dict + :return: Collection API wrapper. + :rtype: aioarango.collection.StandardCollection + :raise aioarango.exceptions.DocumentParseError: On malformed document. + """ + return self.collection(get_col_name(document)) + + @property + def name(self) -> str: + """Return database name. + + :return: Database name. + :rtype: str + """ + return self.db_name + + @property + def aql(self) -> AQL: + """Return AQL (ArangoDB Query Language) API wrapper. + + :return: AQL API wrapper. + :rtype: aioarango.aql.AQL + """ + return AQL(self._conn, self._executor) + + async def properties(self) -> Result[Json]: + """Return database properties. + + :return: Database properties. + :rtype: dict + :raise aioarango.exceptions.DatabasePropertiesError: If retrieval fails. + """ + request = Request( + method="get", + endpoint="/_api/database/current", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DatabasePropertiesError(resp, request) + return format_database(resp.body["result"]) + + return await self._execute(request, response_handler) + + async def execute_transaction( + self, + command: str, + params: Optional[Json] = None, + read: Optional[Sequence[str]] = None, + write: Optional[Sequence[str]] = None, + sync: Optional[bool] = None, + timeout: Optional[Number] = None, + max_size: Optional[int] = None, + allow_implicit: Optional[bool] = None, + intermediate_commit_count: Optional[int] = None, + intermediate_commit_size: Optional[int] = None, + ) -> Result[Any]: + """Execute raw Javascript command in transaction. + + :param command: Javascript command to execute. + :type command: str + :param read: Names of collections read during transaction. If parameter + **allow_implicit** is set to True, any undeclared read collections + are loaded lazily. + :type read: [str] | None + :param write: Names of collections written to during transaction. + Transaction fails on undeclared write collections. + :type write: [str] | None + :param params: Optional parameters passed into the Javascript command. + :type params: dict | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param timeout: Timeout for waiting on collection locks. If set to 0, + ArangoDB server waits indefinitely. If not set, system default + value is used. + :type timeout: int | None + :param max_size: Max transaction size limit in bytes. + :type max_size: int | None + :param allow_implicit: If set to True, undeclared read collections are + loaded lazily. If set to False, transaction fails on any undeclared + collections. + :type allow_implicit: bool | None + :param intermediate_commit_count: Max number of operations after which + an intermediate commit is performed automatically. + :type intermediate_commit_count: int | None + :param intermediate_commit_size: Max size of operations in bytes after + which an intermediate commit is performed automatically. + :type intermediate_commit_size: int | None + :return: Return value of **command**. + :rtype: Any + :raise aioarango.exceptions.TransactionExecuteError: If execution fails. + """ + collections: Json = {"allowImplicit": allow_implicit} + if read is not None: + collections["read"] = read + if write is not None: + collections["write"] = write + + data: Json = {"action": command} + if collections: + data["collections"] = collections + if params is not None: + data["params"] = params + if timeout is not None: + data["lockTimeout"] = timeout + if sync is not None: + data["waitForSync"] = sync + if max_size is not None: + data["maxTransactionSize"] = max_size + if intermediate_commit_count is not None: + data["intermediateCommitCount"] = intermediate_commit_count + if intermediate_commit_size is not None: + data["intermediateCommitSize"] = intermediate_commit_size + + request = Request(method="post", endpoint="/_api/transaction", data=data) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise TransactionExecuteError(resp, request) + + return resp.body.get("result") + + return await self._execute(request, response_handler) + + async def version(self) -> Result[str]: + """Return ArangoDB server version. + + :return: Server version. + :rtype: str + :raise aioarango.exceptions.ServerVersionError: If retrieval fails. + """ + request = Request( + method="get", endpoint="/_api/version", params={"details": False} + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerVersionError(resp, request) + return str(resp.body["version"]) + + return await self._execute(request, response_handler) + + async def details(self) -> Result[Json]: + """Return ArangoDB server details. + + :return: Server details. + :rtype: dict + :raise aioarango.exceptions.ServerDetailsError: If retrieval fails. + """ + request = Request( + method="get", endpoint="/_api/version", params={"details": True} + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body["details"] + return result + raise ServerDetailsError(resp, request) + + return await self._execute(request, response_handler) + + async def status(self) -> Result[Json]: + """Return ArangoDB server status. + + :return: Server status. + :rtype: dict + :raise aioarango.exceptions.ServerStatusError: If retrieval fails. + """ + request = Request( + method="get", + endpoint="/_admin/status", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerStatusError(resp, request) + return format_server_status(resp.body) + + return await self._execute(request, response_handler) + + async def required_db_version(self) -> Result[str]: + """Return required version of target database. + + :return: Required version of target database. + :rtype: str + :raise aioarango.exceptions.ServerRequiredDBVersionError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/database/target-version") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["version"]) + raise ServerRequiredDBVersionError(resp, request) + + return await self._execute(request, response_handler) + + async def engine(self) -> Result[Json]: + """Return the database engine details. + + :return: Database engine details. + :rtype: dict + :raise aioarango.exceptions.ServerEngineError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/engine") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise ServerEngineError(resp, request) + + return await self._execute(request, response_handler) + + async def statistics(self, description: bool = False) -> Result[Json]: + """Return server statistics. + + :return: Server statistics. + :rtype: dict + :raise aioarango.exceptions.ServerStatisticsError: If retrieval fails. + """ + if description: + endpoint = "/_admin/statistics-description" + else: + endpoint = "/_admin/statistics" + + request = Request(method="get", endpoint=endpoint) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise ServerStatisticsError(resp, request) + + return await self._execute(request, response_handler) + + async def role(self) -> Result[str]: + """Return server role. + + :return: Server role. Possible values are "SINGLE" (server which is not + in a cluster), "COORDINATOR" (cluster coordinator), "PRIMARY", + "SECONDARY", "AGENT" (Agency node in a cluster) or "UNDEFINED". + :rtype: str + :raise aioarango.exceptions.ServerRoleError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/server/role") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["role"]) + raise ServerRoleError(resp, request) + + return await self._execute(request, response_handler) + + async def time(self) -> Result[datetime]: + """Return server system time. + + :return: Server system time. + :rtype: datetime.datetime + :raise aioarango.exceptions.ServerTimeError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/time") + + def response_handler(resp: Response) -> datetime: + if not resp.is_success: + raise ServerTimeError(resp, request) + return datetime.fromtimestamp(resp.body["time"]) + + return await self._execute(request, response_handler) + + async def echo(self) -> Result[Json]: + """Return details of the last request (e.g. headers, payload). + + :return: Details of the last request. + :rtype: dict + :raise aioarango.exceptions.ServerEchoError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/echo") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEchoError(resp, request) + result: Json = resp.body + return result + + return await self._execute(request, response_handler) + + async def shutdown(self) -> Result[bool]: # pragma: no cover + """Initiate server shutdown sequence. + + :return: True if the server was shutdown successfully. + :rtype: bool + :raise aioarango.exceptions.ServerShutdownError: If shutdown fails. + """ + request = Request(method="delete", endpoint="/_admin/shutdown") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise ServerShutdownError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def run_tests(self, tests: Sequence[str]) -> Result[Json]: # pragma: no cover + """Run available unittests on the server. + + :param tests: List of files containing the test suites. + :type tests: [str] + :return: Test results. + :rtype: dict + :raise aioarango.exceptions.ServerRunTestsError: If execution fails. + """ + request = Request(method="post", endpoint="/_admin/test", data={"tests": tests}) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerRunTestsError(resp, request) + result: Json = resp.body + return result + + return await self._execute(request, response_handler) + + async def read_log( + self, + upto: Optional[Union[int, str]] = None, + level: Optional[Union[int, str]] = None, + start: Optional[int] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + search: Optional[str] = None, + sort: Optional[str] = None, + ) -> Result[Json]: + """Read the global log from server. + + :param upto: Return the log entries up to the given level (mutually + exclusive with parameter **level**). Allowed values are "fatal", + "error", "warning", "info" (default) and "debug". + :type upto: int | str + :param level: Return the log entries of only the given level (mutually + exclusive with **upto**). Allowed values are "fatal", "error", + "warning", "info" (default) and "debug". + :type level: int | str + :param start: Return the log entries whose ID is greater or equal to + the given value. + :type start: int + :param size: Restrict the size of the result to the given value. This + can be used for pagination. + :type size: int + :param offset: Number of entries to skip (e.g. for pagination). + :type offset: int + :param search: Return only the log entries containing the given text. + :type search: str + :param sort: Sort the log entries according to the given fashion, which + can be "sort" or "desc". + :type sort: str + :return: Server log entries. + :rtype: dict + :raise aioarango.exceptions.ServerReadLogError: If read fails. + """ + params = dict() + if upto is not None: + params["upto"] = upto + if level is not None: + params["level"] = level + if start is not None: + params["start"] = start + if size is not None: + params["size"] = size + if offset is not None: + params["offset"] = offset + if search is not None: + params["search"] = search + if sort is not None: + params["sort"] = sort + + request = Request(method="get", endpoint="/_admin/log", params=params) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerReadLogError(resp, request) + + result: Json = resp.body + if "totalAmount" in result: + resp.body["total_amount"] = resp.body.pop("totalAmount") + return result + + return await self._execute(request, response_handler) + + async def log_levels(self) -> Result[Json]: + """Return current logging levels. + + :return: Current logging levels. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/log/level") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelError(resp, request) + result: Json = resp.body + return result + + return await self._execute(request, response_handler) + + async def set_log_levels(self, **kwargs: str) -> Result[Json]: + """Set the logging levels. + + This method takes arbitrary keyword arguments where the keys are the + logger names and the values are the logging levels. For example: + + .. code-block:: python + + aioarango.set_log_levels( + agency='DEBUG', + collector='INFO', + threads='WARNING' + ) + + Keys that are not valid logger names are ignored. + + :return: New logging levels. + :rtype: dict + """ + request = Request(method="put", endpoint="/_admin/log/level", data=kwargs) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelSetError(resp, request) + result: Json = resp.body + return result + + return await self._execute(request, response_handler) + + async def reload_routing(self) -> Result[bool]: + """Reload the routing information. + + :return: True if routing was reloaded successfully. + :rtype: bool + :raise aioarango.exceptions.ServerReloadRoutingError: If reload fails. + """ + request = Request(method="post", endpoint="/_admin/routing/reload") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise ServerReloadRoutingError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def metrics(self) -> Result[str]: + """Return server metrics in Prometheus format. + + :return: Server metrics in Prometheus format. + :rtype: str + """ + request = Request(method="get", endpoint="/_admin/metrics") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return resp.raw_body + raise ServerMetricsError(resp, request) + + return await self._execute(request, response_handler) + + async def jwt_secrets(self) -> Result[Json]: # pragma: no cover + """Return information on currently loaded JWT secrets. + + :return: Information on currently loaded JWT secrets. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/server/jwt") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise JWTSecretListError(resp, request) + result: Json = resp.body["result"] + return result + + return await self._execute(request, response_handler) + + async def reload_jwt_secrets(self) -> Result[Json]: # pragma: no cover + """Hot-reload JWT secrets. + + Calling this without payload reloads JWT secrets from disk. Only files + specified via arangod startup option ``--server.jwt-secret-keyfile`` or + ``--server.jwt-secret-folder`` are used. It is not possible to change + the location where files are loaded from without restarting the server. + + :return: Information on reloaded JWT secrets. + :rtype: dict + """ + request = Request(method="post", endpoint="/_admin/server/jwt") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise JWTSecretReloadError(resp, request) + result: Json = resp.body["result"] + return result + + return await self._execute(request, response_handler) + + async def tls(self) -> Result[Json]: + """Return TLS data (server key, client-auth CA). + + :return: TLS data. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSError(resp, request) + return format_tls(resp.body["result"]) + + return await self._execute(request, response_handler) + + async def reload_tls(self) -> Result[Json]: + """Reload TLS data (server key, client-auth CA). + + :return: New TLS data. + :rtype: dict + """ + request = Request(method="post", endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSReloadError(resp, request) + return format_tls(resp.body["result"]) + + return await self._execute(request, response_handler) + + async def encryption(self) -> Result[Json]: + """Rotate the user-supplied keys for encryption. + + This method is available only for enterprise edition of ArangoDB. + + :return: New TLS data. + :rtype: dict + :raise aioarango.exceptions.ServerEncryptionError: If retrieval fails. + """ + request = Request(method="post", endpoint="/_admin/server/encryption") + + def response_handler(resp: Response) -> Json: + if resp.is_success: # pragma: no cover + result: Json = resp.body["result"] + return result + raise ServerEncryptionError(resp, request) + + return await self._execute(request, response_handler) + + ####################### + # Database Management # + ####################### + + async def databases(self) -> Result[List[str]]: + """Return the names all databases. + + :return: Database names. + :rtype: [str] + :raise aioarango.exceptions.DatabaseListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/database") + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise DatabaseListError(resp, request) + result: List[str] = resp.body["result"] + return result + + return await self._execute(request, response_handler) + + async def has_database(self, name: str) -> Result[bool]: + """Check if a database exists. + + :param name: Database name. + :type name: str + :return: True if database exists, False otherwise. + :rtype: bool + """ + request = Request(method="get", endpoint="/_api/database") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise DatabaseListError(resp, request) + return name in resp.body["result"] + + return await self._execute(request, response_handler) + + async def create_database( + self, + name: str, + users: Optional[Sequence[Json]] = None, + replication_factor: Union[int, str, None] = None, + write_concern: Optional[int] = None, + sharding: Optional[str] = None, + ) -> Result[bool]: + """Create a new database. + + :param name: Database name. + :type name: str + :param users: List of users with access to the new database, where each + user is a dictionary with fields "username", "password", "active" + and "extra" (see below for example). If not set, only the admin and + current user are granted access. + :type users: [dict] + :param replication_factor: Default replication factor for collections + created in this database. Special values include "satellite" which + replicates the collection to every DBServer, and 1 which disables + replication. Used for clusters only. + :type replication_factor: int | str + :param write_concern: Default write concern for collections created in + this database. Determines how many copies of each shard are + required to be in sync on different DBServers. If there are less + than these many copies in the cluster a shard will refuse to write. + Writes to shards with enough up-to-date copies will succeed at the + same time, however. Value of this parameter can not be larger than + the value of **replication_factor**. Used for clusters only. + :type write_concern: int + :param sharding: Sharding method used for new collections in this + database. Allowed values are: "", "flexible" and "single". The + first two are equivalent. Used for clusters only. + :type sharding: str + :return: True if database was created successfully. + :rtype: bool + :raise aioarango.exceptions.DatabaseCreateError: If create fails. + + Here is an example entry for parameter **users**: + + .. code-block:: python + + { + 'username': 'john', + 'password': 'password', + 'active': True, + 'extra': {'Department': 'IT'} + } + """ + data: Json = {"name": name} + + options: Json = {} + if replication_factor is not None: + options["replicationFactor"] = replication_factor + if write_concern is not None: + options["writeConcern"] = write_concern + if sharding is not None: + options["sharding"] = sharding + if options: + data["options"] = options + + if users is not None: + data["users"] = [ + { + "username": user["username"], + "passwd": user["password"], + "active": user.get("active", True), + "extra": user.get("extra", {}), + } + for user in users + ] + + request = Request(method="post", endpoint="/_api/database", data=data) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise DatabaseCreateError(resp, request) + return True + + return await self._execute(request, response_handler) + + async def delete_database(self, name: str, ignore_missing: bool = False) -> Result[bool]: + """Delete the database. + + :param name: Database name. + :type name: str + :param ignore_missing: Do not raise an exception on missing database. + :type ignore_missing: bool + :return: True if database was deleted successfully, False if database + was not found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.DatabaseDeleteError: If delete fails. + """ + request = Request(method="delete", endpoint=f"/_api/database/{name}") + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1228 and ignore_missing: + return False + if not resp.is_success: + raise DatabaseDeleteError(resp, request) + return True + + return await self._execute(request, response_handler) + + ######################### + # Collection Management # + ######################### + + def collection(self, name: str) -> StandardCollection: + """Return the standard collection API wrapper. + + :param name: Collection name. + :type name: str + :return: Standard collection API wrapper. + :rtype: aioarango.collection.StandardCollection + """ + return StandardCollection(self._conn, self._executor, name) + + async def has_collection(self, name): + """Check if collection exists in the database. + + :param name: Collection name. + :type name: str + :return: True if collection exists, False otherwise. + :rtype: bool + """ + request = Request(method="get", endpoint="/_api/collection") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise CollectionListError(resp, request) + return any(col["name"] == name for col in resp.body["result"]) + + return await self._execute(request, response_handler) + + async def collections(self) -> Result[Jsons]: + """Return the collections in the database. + + :return: Collections in the database and their details. + :rtype: [dict] + :raise aioarango.exceptions.CollectionListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/collection") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise CollectionListError(resp, request) + return [ + { + "id": col["id"], + "name": col["name"], + "system": col["isSystem"], + "type": StandardCollection.types[col["type"]], + "status": StandardCollection.statuses[col["status"]], + } + for col in resp.body["result"] + ] + + return await self._execute(request, response_handler) + + async def create_collection( + self, + name: str, + sync: bool = False, + system: bool = False, + edge: bool = False, + user_keys: bool = True, + key_increment: Optional[int] = None, + key_offset: Optional[int] = None, + key_generator: str = "traditional", + shard_fields: Optional[Sequence[str]] = None, + shard_count: Optional[int] = None, + replication_factor: Optional[int] = None, + shard_like: Optional[str] = None, + sync_replication: Optional[bool] = None, + enforce_replication_factor: Optional[bool] = None, + sharding_strategy: Optional[str] = None, + smart_join_attribute: Optional[str] = None, + write_concern: Optional[int] = None, + schema: Optional[Json] = None, + ) -> Result[StandardCollection]: + """Create a new collection. + + :param name: Collection name. + :type name: str + :param sync: If set to True, document operations via the collection + will block until synchronized to disk by default. + :type sync: bool | None + :param system: If set to True, a system collection is created. The + collection name must have leading underscore "_" character. + :type system: bool + :param edge: If set to True, an edge collection is created. + :type edge: bool + :param key_generator: Used for generating document keys. Allowed values + are "traditional" or "autoincrement". + :type key_generator: str + :param user_keys: If set to True, users are allowed to supply document + keys. If set to False, the key generator is solely responsible for + supplying the key values. + :type user_keys: bool + :param key_increment: Key increment value. Applies only when value of + **key_generator** is set to "autoincrement". + :type key_increment: int + :param key_offset: Key offset value. Applies only when value of + **key_generator** is set to "autoincrement". + :type key_offset: int + :param shard_fields: Field(s) used to determine the target shard. + :type shard_fields: [str] + :param shard_count: Number of shards to create. + :type shard_count: int + :param replication_factor: Number of copies of each shard on different + servers in a cluster. Allowed values are 1 (only one copy is kept + and no synchronous replication), and n (n-1 replicas are kept and + any two copies are replicated across servers synchronously, meaning + every write to the master is copied to all slaves before operation + is reported successful). + :type replication_factor: int + :param shard_like: Name of prototype collection whose sharding + specifics are imitated. Prototype collections cannot be dropped + before imitating collections. Applies to enterprise version of + ArangoDB only. + :type shard_like: str + :param sync_replication: If set to True, server reports success only + when collection is created in all replicas. You can set this to + False for faster server response, and if full replication is not a + concern. + :type sync_replication: bool + :param enforce_replication_factor: Check if there are enough replicas + available at creation time, or halt the operation. + :type enforce_replication_factor: bool + :param sharding_strategy: Sharding strategy. Available for ArangoDB + version and up only. Possible values are "community-compat", + "enterprise-compat", "enterprise-smart-edge-compat", "hash" and + "enterprise-hash-smart-edge". Refer to ArangoDB documentation for + more details on each value. + :type sharding_strategy: str + :param smart_join_attribute: Attribute of the collection which must + contain the shard key value of the smart join collection. The shard + key for the documents must contain the value of this attribute, + followed by a colon ":" and the primary key of the document. + Requires parameter **shard_like** to be set to the name of another + collection, and parameter **shard_fields** to be set to a single + shard key attribute, with another colon ":" at the end. Available + only for enterprise version of ArangoDB. + :type smart_join_attribute: str + :param write_concern: Write concern for the collection. Determines how + many copies of each shard are required to be in sync on different + DBServers. If there are less than these many copies in the cluster + a shard will refuse to write. Writes to shards with enough + up-to-date copies will succeed at the same time. The value of this + parameter cannot be larger than that of **replication_factor**. + Default value is 1. Used for clusters only. + :type write_concern: int + :param schema: Optional dict specifying the collection level schema + for documents. See ArangoDB documentation for more information on + document schema validation. + :type schema: dict + :return: Standard collection API wrapper. + :rtype: aioarango.collection.StandardCollection + :raise aioarango.exceptions.CollectionCreateError: If create fails. + """ + key_options: Json = {"type": key_generator, "allowUserKeys": user_keys} + if key_increment is not None: + key_options["increment"] = key_increment + if key_offset is not None: + key_options["offset"] = key_offset + + data: Json = { + "name": name, + "waitForSync": sync, + "isSystem": system, + "keyOptions": key_options, + "type": 3 if edge else 2, + } + if shard_count is not None: + data["numberOfShards"] = shard_count + if shard_fields is not None: + data["shardKeys"] = shard_fields + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if shard_like is not None: + data["distributeShardsLike"] = shard_like + if sharding_strategy is not None: + data["shardingStrategy"] = sharding_strategy + if smart_join_attribute is not None: + data["smartJoinAttribute"] = smart_join_attribute + if write_concern is not None: + data["writeConcern"] = write_concern + if schema is not None: + data["schema"] = schema + + params: Params = {} + if sync_replication is not None: + params["waitForSyncReplication"] = sync_replication + if enforce_replication_factor is not None: + params["enforceReplicationFactor"] = enforce_replication_factor + + request = Request( + method="post", endpoint="/_api/collection", params=params, data=data + ) + + def response_handler(resp: Response) -> StandardCollection: + if resp.is_success: + return self.collection(name) + raise CollectionCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_collection( + self, name: str, ignore_missing: bool = False, system: Optional[bool] = None + ) -> Result[bool]: + """Delete the collection. + + :param name: Collection name. + :type name: str + :param ignore_missing: Do not raise an exception on missing collection. + :type ignore_missing: bool + :param system: Whether the collection is a system collection. + :type system: bool + :return: True if collection was deleted successfully, False if + collection was not found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.CollectionDeleteError: If delete fails. + """ + params: Params = {} + if system is not None: + params["isSystem"] = system + + request = Request( + method="delete", endpoint=f"/_api/collection/{name}", params=params + ) + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1203 and ignore_missing: + return False + if not resp.is_success: + raise CollectionDeleteError(resp, request) + return True + + return await self._execute(request, response_handler) + + #################### + # Graph Management # + #################### + + def graph(self, name: str) -> Graph: + """Return the graph API wrapper. + + :param name: Graph name. + :type name: str + :return: Graph API wrapper. + :rtype: aioarango.graph.Graph + """ + return Graph(self._conn, self._executor, name) + + async def has_graph(self, name: str) -> Result[bool]: + """Check if a graph exists in the database. + + :param name: Graph name. + :type name: str | unicode + :return: True if graph exists, False otherwise. + :rtype: bool + """ + request = Request(method="get", endpoint="/_api/gharial") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise GraphListError(resp, request) + return any(name == graph["_key"] for graph in resp.body["graphs"]) + + return await self._execute(request, response_handler) + + async def graphs(self) -> Result[Jsons]: + """List all graphs in the database. + + :return: Graphs in the database. + :rtype: [dict] + :raise aioarango.exceptions.GraphListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/gharial") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise GraphListError(resp, request) + return [ + { + "id": body["_id"], + "name": body["_key"], + "revision": body["_rev"], + "orphan_collections": body["orphanCollections"], + "edge_definitions": [ + { + "edge_collection": definition["collection"], + "from_vertex_collections": definition["from"], + "to_vertex_collections": definition["to"], + } + for definition in body["edgeDefinitions"] + ], + "shard_count": body.get("numberOfShards"), + "replication_factor": body.get("replicationFactor"), + } + for body in resp.body["graphs"] + ] + + return await self._execute(request, response_handler) + + async def create_graph( + self, + name: str, + edge_definitions: Optional[Sequence[Json]] = None, + orphan_collections: Optional[Sequence[str]] = None, + smart: Optional[bool] = None, + smart_field: Optional[str] = None, + shard_count: Optional[int] = None, + ) -> Result[Graph]: + """Create a new graph. + + :param name: Graph name. + :type name: str + :param edge_definitions: List of edge definitions, where each edge + definition entry is a dictionary with fields "edge_collection", + "from_vertex_collections" and "to_vertex_collections" (see below + for example). + :type edge_definitions: [dict] | None + :param orphan_collections: Names of additional vertex collections that + are not in edge definitions. + :type orphan_collections: [str] | None + :param smart: If set to True, sharding is enabled (see parameter + **smart_field** below). Applies only to enterprise version of + ArangoDB. + :type smart: bool | None + :param smart_field: Document field used to shard the vertices of the + graph. To use this, parameter **smart** must be set to True and + every vertex in the graph must have the smart field. Applies only + to enterprise version of ArangoDB. + :type smart_field: str | None + :param shard_count: Number of shards used for every collection in the + graph. To use this, parameter **smart** must be set to True and + every vertex in the graph must have the smart field. This number + cannot be modified later once set. Applies only to enterprise + version of ArangoDB. + :type shard_count: int | None + :return: Graph API wrapper. + :rtype: aioarango.graph.Graph + :raise aioarango.exceptions.GraphCreateError: If create fails. + + Here is an example entry for parameter **edge_definitions**: + + .. code-block:: python + + { + 'edge_collection': 'teach', + 'from_vertex_collections': ['teachers'], + 'to_vertex_collections': ['lectures'] + } + """ + data: Json = {"name": name, "options": dict()} + if edge_definitions is not None: + data["edgeDefinitions"] = [ + { + "collection": definition["edge_collection"], + "from": definition["from_vertex_collections"], + "to": definition["to_vertex_collections"], + } + for definition in edge_definitions + ] + if orphan_collections is not None: + data["orphanCollections"] = orphan_collections + if smart is not None: # pragma: no cover + data["isSmart"] = smart + if smart_field is not None: # pragma: no cover + data["options"]["smartGraphAttribute"] = smart_field + if shard_count is not None: # pragma: no cover + data["options"]["numberOfShards"] = shard_count + + request = Request(method="post", endpoint="/_api/gharial", data=data) + + def response_handler(resp: Response) -> Graph: + if resp.is_success: + return Graph(self._conn, self._executor, name) + raise GraphCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_graph( + self, + name: str, + ignore_missing: bool = False, + drop_collections: Optional[bool] = None, + ) -> Result[bool]: + """Drop the graph of the given name from the database. + + :param name: Graph name. + :type name: str + :param ignore_missing: Do not raise an exception on missing graph. + :type ignore_missing: bool + :param drop_collections: Drop the collections of the graph also. This + is only if they are not in use by other graphs. + :type drop_collections: bool | None + :return: True if graph was deleted successfully, False if graph was not + found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.GraphDeleteError: If delete fails. + """ + params: Params = {} + if drop_collections is not None: + params["dropCollections"] = drop_collections + + request = Request( + method="delete", endpoint=f"/_api/gharial/{name}", params=params + ) + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1924 and ignore_missing: + return False + if not resp.is_success: + raise GraphDeleteError(resp, request) + return True + + return await self._execute(request, response_handler) + + ####################### + # Document Management # + ####################### + + async def has_document( + self, document: Json, rev: Optional[str] = None, check_rev: bool = True + ) -> Result[bool]: + """Check if a document exists. + + :param document: Document ID or body with "_id" field. + :type document: str | dict + :param rev: Expected document revision. Overrides value of "_rev" field + in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :return: True if document exists, False otherwise. + :rtype: bool + :raise aioarango.exceptions.DocumentInError: If check fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_doc(document).has( + document=document, rev=rev, check_rev=check_rev + ) + + async def document( + self, document: Json, rev: Optional[str] = None, check_rev: bool = True + ) -> Result[Optional[Json]]: + """Return a document. + + :param document: Document ID or body with "_id" field. + :type document: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :return: Document, or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_doc(document).get( + document=document, rev=rev, check_rev=check_rev + ) + + async def insert_document( + self, + collection: str, + document: Json, + return_new: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + overwrite: bool = False, + return_old: bool = False, + overwrite_mode: Optional[str] = None, + keep_none: Optional[bool] = None, + merge: Optional[bool] = None, + ) -> Result[Union[bool, Json]]: + """Insert a new document. + + :param collection: Collection name. + :type collection: str + :param document: Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + :type document: dict + :param return_new: Include body of the new document in the returned + metadata. Ignored if parameter **silent** is set to True. + :type return_new: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :param overwrite: If set to True, operation does not fail on duplicate + key and the existing document is replaced. + :type overwrite: bool + :param return_old: Include body of the old document if replaced. + Applies only when value of **overwrite** is set to True. + :type return_old: bool + :param overwrite_mode: Overwrite behavior used when the document key + exists already. Allowed values are "replace" (replace-insert) or + "update" (update-insert). Implicitly sets the value of parameter + **overwrite**. + :type overwrite_mode: str | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. Applies + only when **overwrite_mode** is set to "update" (update-insert). + :type keep_none: bool | None + :param merge: If set to True (default), sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + :type merge: bool | None + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + return await self.collection(collection).insert( + document=document, + return_new=return_new, + sync=sync, + silent=silent, + overwrite=overwrite, + return_old=return_old, + overwrite_mode=overwrite_mode, + keep_none=keep_none, + merge=merge, + ) + + async def update_document( + self, + document: Json, + check_rev: bool = True, + merge: bool = True, + keep_none: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Update a document. + + :param document: Partial or full document with the updated values. It + must contain the "_id" field. + :type document: dict + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param merge: If set to True, sub-dictionaries are merged instead of + the new one overwriting the old one. + :type merge: bool | None + :param keep_none: If set to True, fields with value None are retained + in the document. Otherwise, they are removed completely. + :type keep_none: bool | None + :param return_new: Include body of the new document in the result. + :type return_new: bool + :param return_old: Include body of the old document in the result. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_doc(document).update( + document=document, + check_rev=check_rev, + merge=merge, + keep_none=keep_none, + return_new=return_new, + return_old=return_old, + sync=sync, + silent=silent, + ) + + async def replace_document( + self, + document: Json, + check_rev: bool = True, + return_new: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace a document. + + :param document: New document to replace the old one with. It must + contain the "_id" field. Edge document must also have "_from" and + "_to" fields. + :type document: dict + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param return_new: Include body of the new document in the result. + :type return_new: bool + :param return_old: Include body of the old document in the result. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_doc(document).replace( + document=document, + check_rev=check_rev, + return_new=return_new, + return_old=return_old, + sync=sync, + silent=silent, + ) + + async def delete_document( + self, + document: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + return_old: bool = False, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Delete a document. + + :param document: Document ID, key or body. Document body must contain + the "_id" field. + :type document: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **document** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **document** (if given) + is compared against the revision of target document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param return_old: Include body of the old document in the result. + :type return_old: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision), or True if + parameter **silent** was set to True, or False if document was not + found and **ignore_missing** was set to True (does not apply in + transactions). + :rtype: bool | dict + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_doc(document).delete( + document=document, + rev=rev, + check_rev=check_rev, + ignore_missing=ignore_missing, + return_old=return_old, + sync=sync, + silent=silent, + ) + + ################### + # Task Management # + ################### + + async def tasks(self) -> Result[Jsons]: + """Return all currently active server tasks. + + :return: Currently active server tasks. + :rtype: [dict] + :raise aioarango.exceptions.TaskListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/tasks") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TaskListError(resp, request) + result: Jsons = resp.body + return result + + return await self._execute(request, response_handler) + + async def task(self, task_id: str) -> Result[Json]: + """Return the details of an active server task. + + :param task_id: Server task ID. + :type task_id: str + :return: Server task details. + :rtype: dict + :raise aioarango.exceptions.TaskGetError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise TaskGetError(resp, request) + + return await self._execute(request, response_handler) + + async def create_task( + self, + name: str, + command: str, + params: Optional[Json] = None, + period: Optional[int] = None, + offset: Optional[int] = None, + task_id: Optional[str] = None, + ) -> Result[Json]: + """Create a new server task. + + :param name: Name of the server task. + :type name: str + :param command: Javascript command to execute. + :type command: str + :param params: Optional parameters passed into the Javascript command. + :type params: dict | None + :param period: Number of seconds to wait between executions. If set + to 0, the new task will be "timed", meaning it will execute only + once and be deleted afterwards. + :type period: int | None + :param offset: Initial delay before execution in seconds. + :type offset: int | None + :param task_id: Pre-defined ID for the new server task. + :type task_id: str | None + :return: Details of the new task. + :rtype: dict + :raise aioarango.exceptions.TaskCreateError: If create fails. + """ + data: Json = {"name": name, "command": command} + if params is not None: + data["params"] = params + if task_id is not None: + data["id"] = task_id + if period is not None: + data["period"] = period + if offset is not None: + data["offset"] = offset + + if task_id is None: + task_id = "" + + request = Request(method="post", endpoint=f"/_api/tasks/{task_id}", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise TaskCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_task(self, task_id: str, ignore_missing: bool = False) -> Result[bool]: + """Delete a server task. + + :param task_id: Server task ID. + :type task_id: str + :param ignore_missing: Do not raise an exception on missing task. + :type ignore_missing: bool + :return: True if task was successfully deleted, False if task was not + found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.TaskDeleteError: If delete fails. + """ + request = Request(method="delete", endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1852 and ignore_missing: + return False + if not resp.is_success: + raise TaskDeleteError(resp, request) + return True + + return await self._execute(request, response_handler) + + ################### + # User Management # + ################### + + async def has_user(self, username: str) -> Result[bool]: + """Check if user exists. + + :param username: Username. + :type username: str + :return: True if user exists, False otherwise. + :rtype: bool + """ + request = Request(method="get", endpoint="/_api/user") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise UserListError(resp, request) + return any(user["user"] == username for user in resp.body["result"]) + + return await self._execute(request, response_handler) + + async def users(self) -> Result[Jsons]: + """Return all user details. + + :return: List of user details. + :rtype: [dict] + :raise aioarango.exceptions.UserListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/user") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise UserListError(resp, request) + return [ + { + "username": record["user"], + "active": record["active"], + "extra": record["extra"], + } + for record in resp.body["result"] + ] + + return await self._execute(request, response_handler) + + async def user(self, username: str) -> Result[Json]: + """Return user details. + + :param username: Username. + :type username: str + :return: User details. + :rtype: dict + :raise aioarango.exceptions.UserGetError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/user/{username}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise UserGetError(resp, request) + return { + "username": resp.body["user"], + "active": resp.body["active"], + "extra": resp.body["extra"], + } + + return await self._execute(request, response_handler) + + async def create_user( + self, + username: str, + password: Optional[str] = None, + active: Optional[bool] = None, + extra: Optional[Json] = None, + ) -> Result[Json]: + """Create a new user. + + :param username: Username. + :type username: str + :param password: Password. + :type password: str | None + :param active: True if user is active, False otherwise. + :type active: bool | None + :param extra: Additional data for the user. + :type extra: dict | None + :return: New user details. + :rtype: dict + :raise aioarango.exceptions.UserCreateError: If create fails. + """ + data: Json = {"user": username, "passwd": password, "active": active} + if extra is not None: + data["extra"] = extra + + request = Request(method="post", endpoint="/_api/user", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise UserCreateError(resp, request) + return { + "username": resp.body["user"], + "active": resp.body["active"], + "extra": resp.body["extra"], + } + + return await self._execute(request, response_handler) + + async def update_user( + self, + username: str, + password: Optional[str] = None, + active: Optional[bool] = None, + extra: Optional[Json] = None, + ) -> Result[Json]: + """Update a user. + + :param username: Username. + :type username: str + :param password: New password. + :type password: str | None + :param active: Whether the user is active. + :type active: bool | None + :param extra: Additional data for the user. + :type extra: dict | None + :return: New user details. + :rtype: dict + :raise aioarango.exceptions.UserUpdateError: If update fails. + """ + data: Json = {} + if password is not None: + data["passwd"] = password + if active is not None: + data["active"] = active + if extra is not None: + data["extra"] = extra + + request = Request( + method="patch", + endpoint=f"/_api/user/{username}", + data=data, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise UserUpdateError(resp, request) + return { + "username": resp.body["user"], + "active": resp.body["active"], + "extra": resp.body["extra"], + } + + return await self._execute(request, response_handler) + + async def replace_user( + self, + username: str, + password: str, + active: Optional[bool] = None, + extra: Optional[Json] = None, + ) -> Result[Json]: + """Replace a user. + + :param username: Username. + :type username: str + :param password: New password. + :type password: str + :param active: Whether the user is active. + :type active: bool | None + :param extra: Additional data for the user. + :type extra: dict | None + :return: New user details. + :rtype: dict + :raise aioarango.exceptions.UserReplaceError: If replace fails. + """ + data: Json = {"user": username, "passwd": password} + if active is not None: + data["active"] = active + if extra is not None: + data["extra"] = extra + + request = Request(method="put", endpoint=f"/_api/user/{username}", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return { + "username": resp.body["user"], + "active": resp.body["active"], + "extra": resp.body["extra"], + } + raise UserReplaceError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_user(self, username: str, ignore_missing: bool = False) -> Result[bool]: + """Delete a user. + + :param username: Username. + :type username: str + :param ignore_missing: Do not raise an exception on missing user. + :type ignore_missing: bool + :return: True if user was deleted successfully, False if user was not + found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.UserDeleteError: If delete fails. + """ + request = Request(method="delete", endpoint=f"/_api/user/{username}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + elif resp.status_code == 404 and ignore_missing: + return False + raise UserDeleteError(resp, request) + + return await self._execute(request, response_handler) + + ######################### + # Permission Management # + ######################### + + async def permissions(self, username: str) -> Result[Json]: + """Return user permissions for all databases and collections. + + :param username: Username. + :type username: str + :return: User permissions for all databases and collections. + :rtype: dict + :raise aioarango.exceptions.PermissionListError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/user/{username}/database", + params={"full": True}, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body["result"] + return result + raise PermissionListError(resp, request) + + return await self._execute(request, response_handler) + + async def permission( + self, username: str, database: str, collection: Optional[str] = None + ) -> Result[str]: + """Return user permission for a specific database or collection. + + :param username: Username. + :type username: str + :param database: Database name. + :type database: str + :param collection: Collection name. + :type collection: str | None + :return: Permission for given database or collection. + :rtype: str + :raise aioarango.exceptions.PermissionGetError: If retrieval fails. + """ + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += "/" + collection + request = Request(method="get", endpoint=endpoint) + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["result"]) + raise PermissionGetError(resp, request) + + return await self._execute(request, response_handler) + + async def update_permission( + self, + username: str, + permission: str, + database: str, + collection: Optional[str] = None, + ) -> Result[bool]: + """Update user permission for a specific database or collection. + + :param username: Username. + :type username: str + :param permission: Allowed values are "rw" (read and write), "ro" + (read only) or "none" (no access). + :type permission: str + :param database: Database name. + :type database: str + :param collection: Collection name. + :type collection: str | None + :return: True if access was granted successfully. + :rtype: bool + :raise aioarango.exceptions.PermissionUpdateError: If update fails. + """ + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += "/" + collection + + request = Request(method="put", endpoint=endpoint, data={"grant": permission}) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise PermissionUpdateError(resp, request) + + return await self._execute(request, response_handler) + + async def reset_permission( + self, username: str, database: str, collection: Optional[str] = None + ) -> Result[bool]: + """Reset user permission for a specific database or collection. + + :param username: Username. + :type username: str + :param database: Database name. + :type database: str + :param collection: Collection name. + :type collection: str + :return: True if permission was reset successfully. + :rtype: bool + :raise aioarango.exceptions.PermissionRestError: If reset fails. + """ + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += "/" + collection + + request = Request(method="delete", endpoint=endpoint) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise PermissionResetError(resp, request) + + return await self._execute(request, response_handler) + + ######################## + # Async Job Management # + ######################## + + async def async_jobs(self, status: str, count: Optional[int] = None) -> Result[List[str]]: + """Return IDs of async jobs with given status. + + :param status: Job status (e.g. "pending", "done"). + :type status: str + :param count: Max number of job IDs to return. + :type count: int + :return: List of job IDs. + :rtype: [str] + :raise aioarango.exceptions.AsyncJobListError: If retrieval fails. + """ + params: Params = {} + if count is not None: + params["count"] = count + + request = Request(method="get", endpoint=f"/_api/job/{status}", params=params) + + def response_handler(resp: Response) -> List[str]: + if resp.is_success: + result: List[str] = resp.body + return result + raise AsyncJobListError(resp, request) + + return await self._execute(request, response_handler) + + async def clear_async_jobs(self, threshold: Optional[int] = None) -> Result[bool]: + """Clear async job results from the server. + + Async jobs that are still queued or running are not stopped. + + :param threshold: If specified, only the job results created prior to + the threshold (a unix timestamp) are deleted. Otherwise, all job + results are deleted. + :type threshold: int | None + :return: True if job results were cleared successfully. + :rtype: bool + :raise aioarango.exceptions.AsyncJobClearError: If operation fails. + """ + if threshold is None: + request = Request(method="delete", endpoint="/_api/job/all") + else: + request = Request( + method="delete", + endpoint="/_api/job/expired", + params={"stamp": threshold}, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise AsyncJobClearError(resp, request) + + return await self._execute(request, response_handler) + + ################### + # View Management # + ################### + + async def views(self) -> Result[Jsons]: + """Return list of views and their summaries. + + :return: List of views. + :rtype: [dict] + :raise aioarango.exceptions.ViewListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/view") + + def response_handler(resp: Response) -> Jsons: + if resp.is_success: + return [format_view(view) for view in resp.body["result"]] + raise ViewListError(resp, request) + + return await self._execute(request, response_handler) + + async def view(self, name: str) -> Result[Json]: + """Return view details. + + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewGetError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/view/{name}/properties") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewGetError(resp, request) + + return await self._execute(request, response_handler) + + async def create_view( + self, name: str, view_type: str, properties: Optional[Json] = None + ) -> Result[Json]: + """Create a view. + + :param name: View name. + :type name: str + :param view_type: View type (e.g. "arangosearch"). + :type view_type: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewCreateError: If create fails. + """ + data: Json = {"name": name, "type": view_type} + + if properties is not None: + data.update(properties) + + request = Request(method="post", endpoint="/_api/view", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def update_view(self, name: str, properties: Json) -> Result[Json]: + """Update a view. + + :param name: View name. + :type name: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewUpdateError: If update fails. + """ + request = Request( + method="patch", + endpoint=f"/_api/view/{name}/properties", + data=properties, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewUpdateError(resp, request) + + return await self._execute(request, response_handler) + + async def replace_view(self, name: str, properties: Json) -> Result[Json]: + """Replace a view. + + :param name: View name. + :type name: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewReplaceError: If replace fails. + """ + request = Request( + method="put", + endpoint=f"/_api/view/{name}/properties", + data=properties, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewReplaceError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_view(self, name: str, ignore_missing: bool = False) -> Result[bool]: + """Delete a view. + + :param name: View name. + :type name: str + :param ignore_missing: Do not raise an exception on missing view. + :type ignore_missing: bool + :return: True if view was deleted successfully, False if view was not + found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.ViewDeleteError: If delete fails. + """ + request = Request(method="delete", endpoint=f"/_api/view/{name}") + + def response_handler(resp: Response) -> bool: + if resp.error_code == 1203 and ignore_missing: + return False + if resp.is_success: + return True + raise ViewDeleteError(resp, request) + + return await self._execute(request, response_handler) + + async def rename_view(self, name: str, new_name: str) -> Result[bool]: + """Rename a view. + + :param name: View name. + :type name: str + :param new_name: New view name. + :type new_name: str + :return: True if view was renamed successfully. + :rtype: bool + :raise aioarango.exceptions.ViewRenameError: If delete fails. + """ + request = Request( + method="put", + endpoint=f"/_api/view/{name}/rename", + data={"name": new_name}, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise ViewRenameError(resp, request) + + return await self._execute(request, response_handler) + + ################################ + # ArangoSearch View Management # + ################################ + + async def create_arangosearch_view( + self, name: str, properties: Optional[Json] = None + ) -> Result[Json]: + """Create an ArangoSearch view. + + :param name: View name. + :type name: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict | None + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewCreateError: If create fails. + """ + data: Json = {"name": name, "type": "arangosearch"} + + if properties is not None: + data.update(properties) + + request = Request(method="post", endpoint="/_api/view#ArangoSearch", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def update_arangosearch_view(self, name: str, properties: Json) -> Result[Json]: + """Update an ArangoSearch view. + + :param name: View name. + :type name: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewUpdateError: If update fails. + """ + request = Request( + method="patch", + endpoint=f"/_api/view/{name}/properties#ArangoSearch", + data=properties, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewUpdateError(resp, request) + + return await self._execute(request, response_handler) + + async def replace_arangosearch_view(self, name: str, properties: Json) -> Result[Json]: + """Replace an ArangoSearch view. + + :param name: View name. + :type name: str + :param properties: View properties. For more information see + https://www.arangodb.com/docs/stable/http/views-arangosearch.html + :type properties: dict + :return: View details. + :rtype: dict + :raise aioarango.exceptions.ViewReplaceError: If replace fails. + """ + request = Request( + method="put", + endpoint=f"/_api/view/{name}/properties#ArangoSearch", + data=properties, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_view(resp.body) + raise ViewReplaceError(resp, request) + + return await self._execute(request, response_handler) + + ####################### + # Analyzer Management # + ####################### + + async def analyzers(self) -> Result[Jsons]: + """Return list of analyzers. + + :return: List of analyzers. + :rtype: [dict] + :raise aioarango.exceptions.AnalyzerListError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/analyzer") + + def response_handler(resp: Response) -> Jsons: + if resp.is_success: + result: Jsons = resp.body["result"] + return result + raise AnalyzerListError(resp, request) + + return await self._execute(request, response_handler) + + async def analyzer(self, name: str) -> Result[Json]: + """Return analyzer details. + + :param name: Analyzer name. + :type name: str + :return: Analyzer details. + :rtype: dict + :raise aioarango.exceptions.AnalyzerGetError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/analyzer/{name}") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise AnalyzerGetError(resp, request) + + return await self._execute(request, response_handler) + + async def create_analyzer( + self, + name: str, + analyzer_type: str, + properties: Optional[Json] = None, + features: Optional[Sequence[str]] = None, + ) -> Result[Json]: + """Create an analyzer. + + :param name: Analyzer name. + :type name: str + :param analyzer_type: Analyzer type. + :type analyzer_type: str + :param properties: Analyzer properties. + :type properties: dict | None + :param features: Analyzer features. + :type features: list | None + :return: Analyzer details. + :rtype: dict + :raise aioarango.exceptions.AnalyzerCreateError: If create fails. + """ + data: Json = {"name": name, "type": analyzer_type} + + if properties is not None: + data["properties"] = properties + + if features is not None: + data["features"] = features + + request = Request(method="post", endpoint="/_api/analyzer", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + raise AnalyzerCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_analyzer( + self, name: str, force: bool = False, ignore_missing: bool = False + ) -> Result[bool]: + """Delete an analyzer. + + :param name: Analyzer name. + :type name: str + :param force: Remove the analyzer configuration even if in use. + :type force: bool + :param ignore_missing: Do not raise an exception on missing analyzer. + :type ignore_missing: bool + :return: True if analyzer was deleted successfully, False if analyzer + was not found and **ignore_missing** was set to True. + :rtype: bool + :raise aioarango.exceptions.AnalyzerDeleteError: If delete fails. + """ + request = Request( + method="delete", + endpoint=f"/_api/analyzer/{name}", + params={"force": force}, + ) + + def response_handler(resp: Response) -> bool: + if resp.error_code in {1202, 404} and ignore_missing: + return False + if resp.is_success: + return True + raise AnalyzerDeleteError(resp, request) + + return await self._execute(request, response_handler) + + +class StandardDatabase(Database): + """Standard database API wrapper.""" + + def __init__(self, connection: Connection) -> None: + super().__init__(connection=connection, executor=DefaultApiExecutor(connection)) + + def __repr__(self) -> str: + return f"" diff --git a/aioarango/errno.py b/aioarango/errno.py new file mode 100644 index 000000000..3c256eaa0 --- /dev/null +++ b/aioarango/errno.py @@ -0,0 +1,1184 @@ +################## +# General Errors # +################## + +# No error occurred. +NO_ERROR = 0 + +# General error occurred. +FAILED = 1 + +# Operating system error occurred. +SYS_ERROR = 2 + +# Out of memory. +OUT_OF_MEMORY = 3 + +# Internal error occurred. +INTERNAL = 4 + +# Illegal number representation given. +ILLEGAL_NUMBER = 5 + +# Numeric overflow occurred. +NUMERIC_OVERFLOW = 6 + +# Unknown option supplied by user. +ILLEGAL_OPTION = 7 + +# Detected PID without living process. +DEAD_PID = 8 + +# Feature not implemented. +NOT_IMPLEMENTED = 9 + +# Bad parameter. +BAD_PARAMETER = 10 + +# Missing permission. +FORBIDDEN = 11 + +# Out of memory (mmap). +OUT_OF_MEMORY_MMAP = 12 + +# Corrupt CSV line. +CORRUPTED_CSV = 13 + +# File not found. +FILE_NOT_FOUND = 14 + +# Cannot write to file. +CANNOT_WRITE_FILE = 15 + +# Cannot overwrite file. +CANNOT_OVERWRITE_FILE = 16 + +# Type error occurred. +TYPE_ERROR = 17 + +# Timed out waiting for a lock. +LOCK_TIMEOUT = 18 + +# Cannot create a directory. +CANNOT_CREATE_DIRECTORY = 19 + +# Cannot create a temporary file. +CANNOT_CREATE_TEMP_FILE = 20 + +# Request cancelled by user. +REQUEST_CANCELED = 21 + +# Raised for debugging. +DEBUG = 22 + +# Invalid IP address. +IP_ADDRESS_INVALID = 25 + +# File exists already. +FILE_EXISTS = 27 + +# Locked resource or operation. +LOCKED = 28 + +# Deadlock detected when accessing collections. +DEADLOCK = 29 + +# Call failed as server shutdown is in progress. +SHUTTING_DOWN = 30 + +# Feature only for enterprise version of ArangoDB. +ONLY_ENTERPRISE = 31 + +# Resource usage exceeded maximum value. +RESOURCE_LIMIT = 32 + +# ICU operation failed. +ICU_ERROR = 33 + +# Cannot read a file. +CANNOT_READ_FILE = 34 + +# Incompatible version of ArangoDB. +INCOMPATIBLE_VERSION = 35 + +# Requested resource disabled. +DISABLED = 36 + +########################### +# HTTP Error Status Codes # +########################### + +# Bad HTTP parameter. +HTTP_BAD_PARAMETER = 400 + +# User unauthorized. +HTTP_UNAUTHORIZED = 401 + +# Operation forbidden. +HTTP_FORBIDDEN = 403 + +# Unknown URI. +HTTP_NOT_FOUND = 404 + +# HTTP method unknown. +HTTP_METHOD_NOT_ALLOWED = 405 + +# HTTP content type not supported. +HTTP_NOT_ACCEPTABLE = 406 + +# Precondition not met. +HTTP_PRECONDITION_FAILED = 412 + +# Internal server error occurred. +HTTP_SERVER_ERROR = 500 + +# Service temporarily unavailable. +HTTP_SERVICE_UNAVAILABLE = 503 + +# Service contacted by ArangoDB did not respond in time. +HTTP_GATEWAY_TIMEOUT = 504 + +########################## +# HTTP Processing Errors # +########################## + +# Corrupted JSON string. +HTTP_CORRUPTED_JSON = 600 + +# URL contains superfluous suffices. +HTTP_SUPERFLUOUS_SUFFICES = 601 + +#################################### +# Internal ArangoDB Storage Errors # +#################################### + +# Datafile in illegal state. +ILLEGAL_STATE = 1000 + +# User attempted to write to a sealed datafile. +DATAFILE_SEALED = 1002 + +# Read-only datafile or collection. +READ_ONLY = 1004 + +# Duplicate identifier detected. +DUPLICATE_IDENTIFIER = 1005 + +# Datafile unreadable. +DATAFILE_UNREADABLE = 1006 + +# Datafile empty. +DATAFILE_EMPTY = 1007 + +# Error occurred during WAL log file recovery. +RECOVERY = 1008 + +# Required datafile statistics object not found. +DATAFILE_STATISTICS_NOT_FOUND = 1009 + +#################################### +# External ArangoDB Storage Errors # +#################################### + +# Datafile corrupted. +CORRUPTED_DATAFILE = 1100 + +# Parameter file corrupted or cannot be read. +ILLEGAL_PARAMETER_FILE = 1101 + +# Collection contains one or more corrupted datafiles. +CORRUPTED_COLLECTION = 1102 + +# System call mmap failed. +MMAP_FAILED = 1103 + +# Filesystem full. +FILESYSTEM_FULL = 1104 + +# Cannot create journal. +NO_JOURNAL = 1105 + +# Datafile of the same name already exists. +DATAFILE_ALREADY_EXISTS = 1106 + +# Database directory locked by another process. +DATADIR_LOCKED = 1107 + +# Directory of the same name already exists. +COLLECTION_DIRECTORY_ALREADY_EXISTS = 1108 + +# System call msync failed. +MSYNC_FAILED = 1109 + +# Cannot lock the database directory on startup. +DATADIR_UNLOCKABLE = 1110 + +# Server waited too long for the datafile to be synced to disk. +SYNC_TIMEOUT = 1111 + +################################### +# General ArangoDB Storage Errors # +################################### + +# Conflict detected while updating or deleting a document. +CONFLICT = 1200 + +# Database directory invalid. +DATADIR_INVALID = 1201 + +# Unknown document identifier or handle. +DOCUMENT_NOT_FOUND = 1202 + +# Collection with given identifier or name unknown. +DATA_SOURCE_NOT_FOUND = 1203 + +# Missing collection parameter. +COLLECTION_PARAMETER_MISSING = 1204 + +# Invalid document handle. +DOCUMENT_HANDLE_BAD = 1205 + +# Maximal journal size too small. +MAXIMAL_SIZE_TOO_SMALL = 1206 + +# Duplicate name detected. +DUPLICATE_NAME = 1207 + +# Illegal name detected. +ILLEGAL_NAME = 1208 + +# No suitable index for query. +NO_INDEX = 1209 + +# Unique constraint violation. +UNIQUE_CONSTRAINT_VIOLATED = 1210 + +# Index with unknown identifier. +INDEX_NOT_FOUND = 1212 + +# Cross-collection requested. +CROSS_COLLECTION_REQUEST = 1213 + +# Index handle corrupted. +INDEX_HANDLE_BAD = 1214 + +# Document too large to fit into any datafile. +DOCUMENT_TOO_LARGE = 1216 + +# Collection must be unloaded. +COLLECTION_NOT_UNLOADED = 1217 + +# Invalid collection type. +COLLECTION_TYPE_INVALID = 1218 + +# Failed to parse an attribute name definition. +ATTRIBUTE_PARSER_FAILED = 1220 + +# Corrupted document key. +DOCUMENT_KEY_BAD = 1221 + +# User-defined document key supplied for collections with auto key generation. +DOCUMENT_KEY_UNEXPECTED = 1222 + +# Database directory not writable for current user. +DATADIR_NOT_WRITABLE = 1224 + +# Key generator out of keys. +OUT_OF_KEYS = 1225 + +# Document key missing. +DOCUMENT_KEY_MISSING = 1226 + +# There was an attempt to create a document of invalid type. +DOCUMENT_TYPE_INVALID = 1227 + +# Non-existing database accessed. +DATABASE_NOT_FOUND = 1228 + +# Invalid database used. +DATABASE_NAME_INVALID = 1229 + +# Operation requested in non-system database. +USE_SYSTEM_DATABASE = 1230 + +# Invalid key generator. +INVALID_KEY_GENERATOR = 1232 + +# Undefined or invalid "_from" or "_to" values in an edge document. +INVALID_EDGE_ATTRIBUTE = 1233 + +# Cannot create index. +INDEX_CREATION_FAILED = 1235 + +# Server is write-throttled and a write operation waited too long. +WRITE_THROTTLE_TIMEOUT = 1236 + +# Collection type mismatch. +COLLECTION_TYPE_MISMATCH = 1237 + +# Collection accessed but not yet loaded. +COLLECTION_NOT_LOADED = 1238 + +# Document revision corrupt or missing. +DOCUMENT_REV_BAD = 1239 + +# Read cannot be completed by storage engine. +INCOMPLETE_READ = 1240 + +################################### +# Checked ArangoDB Storage Errors # +################################### + +# Datafile full. +DATAFILE_FULL = 1300 + +# Server database directory empty. +EMPTY_DATADIR = 1301 + +# Operation needs to be retried. +TRY_AGAIN = 1302 + +# Storage engine busy. +BUSY = 1303 + +# Datafile merge in progress and the operation cannot be completed. +MERGE_IN_PROGRESS = 1304 + +# Storage engine encountered an I/O error. +IO_ERROR = 1305 + +############################### +# ArangoDB Replication Errors # +############################### + +# Replication applier received no (or incomplete) response from master. +REPLICATION_NO_RESPONSE = 1400 + +# Replication applier received an invalid response from master. +REPLICATION_INVALID_RESPONSE = 1401 + +# Replication applier received a server error from master. +REPLICATION_MASTER_ERROR = 1402 + +# Replication applier tried to connect to master with incompatible version. +REPLICATION_MASTER_INCOMPATIBLE = 1403 + +# Replication applier connected to a different master than before. +REPLICATION_MASTER_CHANGE = 1404 + +# Replication applier was asked to connect to itself for replication. +REPLICATION_LOOP = 1405 + +# Unexpected marker found in replication log stream. +REPLICATION_UNEXPECTED_MARKER = 1406 + +# Found invalid replication applier state file. +REPLICATION_INVALID_APPLIER_STATE = 1407 + +# Found unexpected transaction ID. +REPLICATION_UNEXPECTED_TRANSACTION = 1408 + +# Invalid replication applier configuration. +REPLICATION_INVALID_APPLIER_CONFIGURATION = 1410 + +# Operation attempted while replication applier is running. +REPLICATION_RUNNING = 1411 + +# Replication applier stopped by user. +REPLICATION_APPLIER_STOPPED = 1412 + +# Replication applier started without a known start tick value. +REPLICATION_NO_START_TICK = 1413 + +# Replication applier started without a known start tick value. +REPLICATION_START_TICK_NOT_PRESENT = 1414 + +# Newborn follower submits a wrong checksum. +REPLICATION_WRONG_CHECKSUM = 1416 + +# Shard is not empty and follower tries a shortcut. +REPLICATION_SHARD_NONEMPTY = 1417 + +########################### +# ArangoDB Cluster Errors # +########################### + +# Raised on some occasions when one server gets a request from another. +CLUSTER_SERVER_UNKNOWN = 1449 + +# Coordinator cannot create a collection as the collection ID already exists. +CLUSTER_COLLECTION_ID_EXISTS = 1453 + +# Coordinator cannot create an entry for a new collection in Plan hierarchy. +CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN = 1454 + +# Coordinator sees DBServer issues when creating shards for a new collection. +CLUSTER_COULD_NOT_CREATE_COLLECTION = 1456 + +# Coordinator runs into a timeout for some cluster wide operation. +CLUSTER_TIMEOUT = 1457 + +# Coordinator cannot remove an entry for a collection in Plan hierarchy. +CLUSTER_COULD_NOT_REMOVE_COLLECTION_IN_PLAN = 1458 + +# Coordinator cannot remove an entry for a collection in Current hierarchy. +CLUSTER_COULD_NOT_REMOVE_COLLECTION_IN_CURRENT = 1459 + +# Coordinator cannot create an entry for a new database in the Plan hierarchy. +CLUSTER_COULD_NOT_CREATE_DATABASE_IN_PLAN = 1460 + +# Coordinator sees DBServer issues when creating databases for a new cluster. +CLUSTER_COULD_NOT_CREATE_DATABASE = 1461 + +# Coordinator cannot remove an entry for a database in the Plan hierarchy. +CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN = 1462 + +# Coordinator cannot remove an entry for a database in the Current hierarchy. +CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT = 1463 + +# Coordinator cannot determine the shard responsible for a given document. +CLUSTER_SHARD_GONE = 1464 + +# Coordinator loses HTTP connection to a DBServer while transferring data. +CLUSTER_CONNECTION_LOST = 1465 + +# "_key" attribute specified in sharded collection which uses not only "_key" +# as sharding attribute. +CLUSTER_MUST_NOT_SPECIFY_KEY = 1466 + +# Coordinator gets conflicting results from different shards. +CLUSTER_GOT_CONTRADICTING_ANSWERS = 1467 + +# Coordinator tries to find out the shard responsible for a partial document. +CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN = 1468 + +# Not allowed to update the value of a shard attribute. +CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES = 1469 + +# Operation not supported in sharded collection. +CLUSTER_UNSUPPORTED = 1470 + +# Operation is coordinator-only. +CLUSTER_ONLY_ON_COORDINATOR = 1471 + +# Coordinator or DBServer cannot read the Plan. +CLUSTER_READING_PLAN_AGENCY = 1472 + +# Coordinator cannot truncate all shards of a cluster collection. +CLUSTER_COULD_NOT_TRUNCATE_COLLECTION = 1473 + +# Internal communication of the cluster for AQL produces an error. +CLUSTER_AQL_COMMUNICATION = 1474 + +# Operation is DBServer-only. +CLUSTER_ONLY_ON_DBSERVER = 1477 + +# Cannot reach a required DBServer. +CLUSTER_BACKEND_UNAVAILABLE = 1478 + +# Required collection out of sync during AQL execution. +CLUSTER_AQL_COLLECTION_OUT_OF_SYNC = 1481 + +# Coordinator cannot create an entry for a new index in Plan hierarchy. +CLUSTER_COULD_NOT_CREATE_INDEX_IN_PLAN = 1482 + +# Coordinator cannot remove an index from Plan hierarchy. +CLUSTER_COULD_NOT_DROP_INDEX_IN_PLAN = 1483 + +# One tries to create a collection with "shards_like" attribute which points +# to another collection that also has one. +CLUSTER_CHAIN_OF_DISTRIBUTESHARDSLIKE = 1484 + +# One tries to drop a collection to which another collection points with its +# "shard_like" attribute. +CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE = 1485 + +# One tries to create a collection which points to an unknown collection in its +# "shard_like" attribute. +CLUSTER_UNKNOWN_DISTRIBUTESHARDSLIKE = 1486 + +# One tries to create a collection with a "replication_factor" greater than the +# available number of DBServers. +CLUSTER_INSUFFICIENT_DBSERVERS = 1487 + +# Cannot drop follower. +CLUSTER_COULD_NOT_DROP_FOLLOWER = 1488 + +# Replication operation refused by a shard leader. +CLUSTER_SHARD_LEADER_REFUSES_REPLICATION = 1489 + +# Non-replication operation refused by a shard follower. +CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION = 1490 + +# Shard leader resigned in the meantime. +CLUSTER_SHARD_LEADER_RESIGNED = 1491 + +# Agency operation failed after various retries. +CLUSTER_AGENCY_COMMUNICATION_FAILED = 1492 + +# Servers currently competing for leadership. +CLUSTER_LEADERSHIP_CHALLENGE_ONGOING = 1495 + +# Operation sent to a non-leading server. +CLUSTER_NOT_LEADER = 1496 + +# Coordinator cannot create an entry for a new view in Plan hierarchy. +CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN = 1497 + +# Coordinator tries to create a view and the ID already exists. +CLUSTER_VIEW_ID_EXISTS = 1498 + +# Coordinator cannot drop a collection entry in Plan hierarchy. +CLUSTER_COULD_NOT_DROP_COLLECTION = 1499 + +######################### +# ArangoDB Query Errors # +######################### + +# Running query killed by an explicit admin command. +QUERY_KILLED = 1500 + +# Parsed query syntactically invalid. +QUERY_PARSE = 1501 + +# Empty query specified. +QUERY_EMPTY = 1502 + +# Runtime error caused by query. +QUERY_SCRIPT = 1503 + +# Number out of range. +QUERY_NUMBER_OUT_OF_RANGE = 1504 + +# Geo index coordinate invalid or out of range. +QUERY_INVALID_GEO_VALUE = 1505 + +# Invalid variable name. +QUERY_VARIABLE_NAME_INVALID = 1510 + +# Variable redeclared in a query. +QUERY_VARIABLE_REDECLARED = 1511 + +# Variable name unknown or undefined. +QUERY_VARIABLE_NAME_UNKNOWN = 1512 + +# Cannot acquire lock on collection. +QUERY_COLLECTION_LOCK_FAILED = 1521 + +# Too many collections or shards in a query. +QUERY_TOO_MANY_COLLECTIONS = 1522 + +# Document attribute redeclared. +QUERY_DOCUMENT_ATTRIBUTE_REDECLARED = 1530 + +# Undefined function called. +QUERY_FUNCTION_NAME_UNKNOWN = 1540 + +# Argument number mismatch. +QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH = 1541 + +# Argument type mismatch. +QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH = 1542 + +# Invalid regex. +QUERY_INVALID_REGEX = 1543 + +# Invalid bind parameters. +QUERY_BIND_PARAMETERS_INVALID = 1550 + +# Bind parameter missing. +QUERY_BIND_PARAMETER_MISSING = 1551 + +# Bind parameter undeclared. +QUERY_BIND_PARAMETER_UNDECLARED = 1552 + +# Invalid bind parameter value or type. +QUERY_BIND_PARAMETER_TYPE = 1553 + +# Non-boolean value used in logical operation. +QUERY_INVALID_LOGICAL_VALUE = 1560 + +# Non-numeric value used in arithmetic operation. +QUERY_INVALID_ARITHMETIC_VALUE = 1561 + +# Divide by zero. +QUERY_DIVISION_BY_ZERO = 1562 + +# Non-list operand used when expecting an list operand. +QUERY_ARRAY_EXPECTED = 1563 + +# Function "FAIL()" called inside a query. +QUERY_FAIL_CALLED = 1569 + +# Geo restriction specified but no suitable geo index found. +QUERY_GEO_INDEX_MISSING = 1570 + +# Fulltext query performed on a collection without suitable fulltext index. +QUERY_FULLTEXT_INDEX_MISSING = 1571 + +# Cannot convert value to a date. +QUERY_INVALID_DATE_VALUE = 1572 + +# Query contains more than one data-modifying operation. +QUERY_MULTI_MODIFY = 1573 + +# Query contains an invalid aggregate expression. +QUERY_INVALID_AGGREGATE_EXPRESSION = 1574 + +# Query contains options that cannot be resolved at query compile time. +QUERY_COMPILE_TIME_OPTIONS = 1575 + +# Query contains an invalid options specification. +QUERY_EXCEPTION_OPTIONS = 1576 + +# Unusable index hint. +QUERY_FORCED_INDEX_HINT_UNUSABLE = 1577 + +# Dynamic function not allowed. +QUERY_DISALLOWED_DYNAMIC_CALL = 1578 + +# Collection data accessed after modification. +QUERY_ACCESS_AFTER_MODIFICATION = 1579 + +############################ +# AQL User Function Errors # +############################ + +# User function registered with invalid name. +QUERY_FUNCTION_INVALID_NAME = 1580 + +# User function registered with invalid code. +QUERY_FUNCTION_INVALID_CODE = 1581 + +# User function not found. +QUERY_FUNCTION_NOT_FOUND = 1582 + +# Runtime exception raised by query function. +QUERY_FUNCTION_RUNTIME_ERROR = 1583 + +############################# +# AQL Query Registry Errors # +############################# + +# Query received an invalid JSON. +QUERY_BAD_JSON_PLAN = 1590 + +# Query ID not found. +QUERY_NOT_FOUND = 1591 + +# User provided expression does not evaluate to true. +QUERY_USER_ASSERT = 1593 + +# User provided expression does not evaluate to true. +QUERY_USER_WARN = 1594 + +########################## +# ArangoDB Cursor Errors # +########################## + +# Cursor ID not found. +CURSOR_NOT_FOUND = 1600 + +# Concurrent request still using the cursor. +CURSOR_BUSY = 1601 + +############################## +# ArangoDB Validation Errors # +############################## + +# Document does not pass schema validation. +VALIDATION_FAILED = 1620 + +# Schema description is invalid. +VALIDATION_BAD_PARAMETER = 1621 + +############################### +# ArangoDB Transaction Errors # +############################### + +# Wrong usage of transactions. This is an internal error. +TRANSACTION_INTERNAL = 1650 + +# Nested transactions. +TRANSACTION_NESTED = 1651 + +# Unregistered collection used in transaction. +TRANSACTION_UNREGISTERED_COLLECTION = 1652 + +# Disallowed operation in transaction. +TRANSACTION_DISALLOWED_OPERATION = 1653 + +# Transaction aborted. +TRANSACTION_ABORTED = 1654 + +# Transaction not found. +TRANSACTION_NOT_FOUND = 1655 + +########################## +# User Management Errors # +########################## + +# Invalid username. +USER_INVALID_NAME = 1700 + +# Username already exists. +USER_DUPLICATE = 1702 + +# User not found. +USER_NOT_FOUND = 1703 + +# User authenticated by an external server. +USER_EXTERNAL = 1705 + +###################################### +# Service Management Errors (Legacy) # +###################################### + +# Cannot download service from central repository. +SERVICE_DOWNLOAD_FAILED = 1752 + +# Service upload from the client to the ArangoDB server failed. +SERVICE_UPLOAD_FAILED = 1753 + +############### +# LDAP Errors # +############### + +# Cannot initialize an LDAP connection. +LDAP_CANNOT_INIT = 1800 + +# Cannot set an LDAP option. +LDAP_CANNOT_SET_OPTION = 1801 + +# Cannot bind to an LDAP server. +LDAP_CANNOT_BIND = 1802 + +# Cannot unbind from an LDAP server. +LDAP_CANNOT_UNBIND = 1803 + +# Cannot search the LDAP server. +LDAP_CANNOT_SEARCH = 1804 + +# Cannot start a TLS LDAP session. +LDAP_CANNOT_START_TLS = 1805 + +# LDAP did not find any objects with the specified search query. +LDAP_FOUND_NO_OBJECTS = 1806 + +# LDAP found zero or more than one user. +LDAP_NOT_ONE_USER_FOUND = 1807 + +# LDAP user not identified. +LDAP_USER_NOT_IDENTIFIED = 1808 + +# Cannot distinguish a valid mode for provided LDAP configuration. +LDAP_INVALID_MODE = 1820 + +############### +# Task Errors # +############### + +# Task created with an invalid ID. +TASK_INVALID_ID = 1850 + +# Task created with a duplicate ID. +TASK_DUPLICATE_ID = 1851 + +# Task not found. +TASK_NOT_FOUND = 1852 + +############################ +# Graph / Traversal Errors # +############################ + +# Invalid name passed to the server. +GRAPH_INVALID_GRAPH = 1901 + +# Invalid graph name passed to the server. +GRAPH_COULD_NOT_CREATE_GRAPH = 1902 + +# Invalid vertex ID passed to the server. +GRAPH_INVALID_VERTEX = 1903 + +# Vertex could not be created. +GRAPH_COULD_NOT_CREATE_VERTEX = 1904 + +# Vertex could not be changed. +GRAPH_COULD_NOT_CHANGE_VERTEX = 1905 + +# Invalid edge ID passed to the server. +GRAPH_INVALID_EDGE = 1906 + +# Edge could not be created. +GRAPH_COULD_NOT_CREATE_EDGE = 1907 + +# Edge could not be changed. +GRAPH_COULD_NOT_CHANGE_EDGE = 1908 + +# Too many iterations in graph traversal. +GRAPH_TOO_MANY_ITERATIONS = 1909 + +# Invalid filter result returned in graph traversal. +GRAPH_INVALID_FILTER_RESULT = 1910 + +# Edge collection may only be used once in a edge definition. +GRAPH_COLLECTION_MULTI_USE = 1920 + +# Collection already used by another graph in a different edge definition. +GRAPH_COLLECTION_USE_IN_MULTI_GRAPHS = 1921 + +# Graph name missing. +GRAPH_CREATE_MISSING_NAME = 1922 + +# Edge definition malformed (must be a list of dicts). +GRAPH_CREATE_MALFORMED_EDGE_DEFINITION = 1923 + +# Graph not found. +GRAPH_NOT_FOUND = 1924 + +# Graph name already exists. +GRAPH_DUPLICATE = 1925 + +# Vertex collection does not exist or is not part of the graph. +GRAPH_VERTEX_COL_DOES_NOT_EXIST = 1926 + +# Collection not a vertex collection. +GRAPH_WRONG_COLLECTION_TYPE_VERTEX = 1927 + +# Vertex collection not in orphan collections of the graph. +GRAPH_NOT_IN_ORPHAN_COLLECTION = 1928 + +# Collection already used in an edge definition of the graph. +GRAPH_COLLECTION_USED_IN_EDGE_DEF = 1929 + +# Edge collection not used in any edge definition of the graph. +GRAPH_EDGE_COLLECTION_NOT_USED = 1930 + +# Collection "_graphs" does not exist. +GRAPH_NO_GRAPH_COLLECTION = 1932 + +# Invalid example array object string. +GRAPH_INVALID_EXAMPLE_ARRAY_OBJECT_STRING = 1933 + +# Invalid example type (must be a list or dict). +GRAPH_INVALID_EXAMPLE_ARRAY_OBJECT = 1934 + +# Invalid number of arguments. +GRAPH_INVALID_NUMBER_OF_ARGUMENTS = 1935 + +# Invalid parameter type. +GRAPH_INVALID_PARAMETER = 1936 + +# Invalid ID. +GRAPH_INVALID_ID = 1937 + +# Collection already in orphans of the graph. +GRAPH_COLLECTION_USED_IN_ORPHANS = 1938 + +# Edge collection does not exist or is not part of the graph. +GRAPH_EDGE_COL_DOES_NOT_EXIST = 1939 + +# Graph has no edge collections. +GRAPH_EMPTY = 1940 + +# Invalid data in "_graphs" collection. +GRAPH_INTERNAL_DATA_CORRUPT = 1941 + +# Edge collection already defined. +GRAPH_INTERNAL_EDGE_COLLECTION_ALREADY_SET = 1942 + +# Orphan list argument malformed. Must be a list of strings. +GRAPH_CREATE_MALFORMED_ORPHAN_LIST = 1943 + +# Collection used as a relation exists. +GRAPH_EDGE_DEFINITION_IS_DOCUMENT = 1944 + +# Invalid/unknown session ID passed to the server. +SESSION_UNKNOWN = 1950 + +# Session expired. +SESSION_EXPIRED = 1951 + +######################## +# Simple Client Errors # +######################## + +# This error should not happen. +SIMPLE_CLIENT_UNKNOWN_ERROR = 2000 + +# Client could not connect to server. +SIMPLE_CLIENT_COULD_NOT_CONNECT = 2001 + +# Client could not write data. +SIMPLE_CLIENT_COULD_NOT_WRITE = 2002 + +# Client could not read data. +SIMPLE_CLIENT_COULD_NOT_READ = 2003 + +# Will be raised if was erlaube?! +WAS_ERLAUBE = 2019 + +####################### +# Communicator Errors # +####################### + +# Communicator request aborted. +COMMUNICATOR_REQUEST_ABORTED = 2100 + +# Communicator disabled. +COMMUNICATOR_DISABLED = 2101 + +####################### +# Internal AQL errors # +####################### + +# Internal error during AQL execution. +INTERNAL_AQL = 2200 + +# AQL block wrote in too few output registers. +WROTE_TOO_FEW_OUTPUT_REGISTERS = 2201 + +# AQL block wrote in too many output registers. +WROTE_TOO_MANY_OUTPUT_REGISTERS = 2202 + +# AQL block wrote in an output register twice. +WROTE_OUTPUT_REGISTER_TWICE = 2203 + +# AQL block wrote in a register that is not its output. +WROTE_IN_WRONG_REGISTER = 2204 + +# AQL block did not copy its input registers. +INPUT_REGISTERS_NOT_COPIED = 2205 + +########################## +# Foxx Management Errors # +########################## + +# Service manifest file not a well-formed JSON. +MALFORMED_MANIFEST_FILE = 3000 + +# Service manifest contains invalid values. +INVALID_SERVICE_MANIFEST = 3001 + +# Service folder or bundle does not exist on the server. +SERVICE_FILES_MISSING = 3002 + +# Local service bundle does not match the checksum in the database. +SERVICE_FILES_OUTDATED = 3003 + +# Service options contain invalid values. +INVALID_FOXX_OPTIONS = 3004 + +# Service mountpath contains invalid characters. +INVALID_MOUNTPOINT = 3007 + +# No service found at given mountpath. +SERVICE_NOT_FOUND = 3009 + +# Service missing configuration or dependencies. +SERVICE_NEEDS_CONFIGURATION = 3010 + +# Service already exists at given mountpath. +SERVICE_MOUNTPOINT_CONFLICT = 3011 + +# Service directory does not contain a manifest file. +SERVICE_MANIFEST_NOT_FOUND = 3012 + +# Service options are not well-formed JSONs. +SERVICE_OPTIONS_MALFORMED = 3013 + +# Source path does not match a file or directory. +SERVICE_SOURCE_NOT_FOUND = 3014 + +# Source path could not be resolved. +SERVICE_SOURCE_ERROR = 3015 + +# Unknown service script. +SERVICE_UNKNOWN_SCRIPT = 3016 + +# API for managing Foxx services disabled. +SERVICE_API_DISABLED = 3099 + +################################### +# JavaScript Module Loader Errors # +################################### + +# Cannot resolve module path. +MODULE_NOT_FOUND = 3100 + +# Module could not be parsed because of a syntax error. +MODULE_SYNTAX_ERROR = 3101 + +# Failed to invoke the module in its context. +MODULE_FAILURE = 3103 + +##################### +# Enterprise Errors # +##################### + +# Requested collection needs to be smart. +NO_SMART_COLLECTION = 4000 + +# Given document does not have the smart graph attribute set. +NO_SMART_GRAPH_ATTRIBUTE = 4001 + +# Smart collection cannot be dropped. +CANNOT_DROP_SMART_COLLECTION = 4002 + +# "_key" not prefixed with the value of the smart graph attribute. +KEY_MUST_BE_PREFIXED_WITH_SMART_GRAPH_ATTRIBUTE = 4003 + +# Given smart graph attribute is illegal and cannot be used for sharding. +ILLEGAL_SMART_GRAPH_ATTRIBUTE = 4004 + +# Smart graph attribute of collection does not match the attribute of graph. +SMART_GRAPH_ATTRIBUTE_MISMATCH = 4005 + +# Invalid smart join attribute declaration. +INVALID_SMART_JOIN_ATTRIBUTE = 4006 + +# Key must be prefixed with smart join attribute. +KEY_MUST_BE_PREFIXED_WITH_SMART_JOIN_ATTRIBUTE = 4007 + +# Document lacks required smart join attribute. +NO_SMART_JOIN_ATTRIBUTE = 4008 + +# Cannot update the value of the smart join attribute. +CLUSTER_MUST_NOT_CHANGE_SMART_JOIN_ATTRIBUTE = 4009 + +######################### +# Cluster Repair Errors # +######################### + +# General error during cluster repairs. +CLUSTER_REPAIRS_FAILED = 5000 + +# Cluster repairs not healthy enough. +CLUSTER_REPAIRS_NOT_ENOUGH_HEALTHY = 5001 + +# Raised on various inconsistencies regarding the replication factor. +CLUSTER_REPAIRS_REPLICATION_FACTOR_VIOLATED = 5002 + +# Repaired collection has some shards without DBServers. +CLUSTER_REPAIRS_NO_DBSERVERS = 5003 + +# Shard in collection and its prototype in the corresponding "shard_like" +# collection have mismatching leaders. +CLUSTER_REPAIRS_MISMATCHING_LEADERS = 5004 + +# Shard in collection and its prototype in the corresponding "shard_like" +# collection don't have the same followers. +CLUSTER_REPAIRS_MISMATCHING_FOLLOWERS = 5005 + +# Repaired collection does not have "shard_like" as expected. +CLUSTER_REPAIRS_INCONSISTENT_ATTRIBUTES = 5006 + +# Collection and its "shard_like" prototype have unequal number of DBServers. +CLUSTER_REPAIRS_MISMATCHING_SHARDS = 5007 + +# Move shard job failed during cluster repairs. +CLUSTER_REPAIRS_JOB_FAILED = 5008 + +# Move shard job disappeared before finishing. +CLUSTER_REPAIRS_JOB_DISAPPEARED = 5009 + +# Agency transaction failed during either sending or executing it. +CLUSTER_REPAIRS_OPERATION_FAILED = 5010 + +################# +# Agency Errors # +################# + +# Inform message must be an object. +AGENCY_INFORM_MUST_BE_OBJECT = 20011 + +# Inform message must contain a uint parameter 'term'. +AGENCY_INFORM_MUST_CONTAIN_TERM = 20012 + +# Inform message must contain a string parameter 'ID'. +AGENCY_INFORM_MUST_CONTAIN_ID = 20013 + +# Inform message must contain an array 'active'. +AGENCY_INFORM_MUST_CONTAIN_ACTIVE = 20014 + +# Inform message must contain an object 'pool'. +AGENCY_INFORM_MUST_CONTAIN_POOL = 20015 + +# Inform message must contain an object 'min ping'. +AGENCY_INFORM_MUST_CONTAIN_MIN_PING = 20016 + +# Inform message must contain an object 'max ping'. +AGENCY_INFORM_MUST_CONTAIN_MAX_PING = 20017 + +# Inform message must contain an object 'timeoutMult'. +AGENCY_INFORM_MUST_CONTAIN_TIMEOUT_MULT = 20018 + +# Cannot rebuild readDB or the spearHead from replicated log. +AGENCY_CANNOT_REBUILD_DBS = 20021 + +###################### +# Supervision Errors # +###################### + +# General supervision failure. +SUPERVISION_GENERAL_FAILURE = 20501 + +##################### +# Dispatcher Errors # +##################### + +# Queue is full. +QUEUE_FULL = 21003 + +###################### +# Maintenance Errors # +###################### + +# Maintenance action cannot be stopped once started. +ACTION_OPERATION_UNABORTABLE = 6002 + +# This maintenance action is still processing. +ACTION_UNFINISHED = 6003 + +# No such maintenance action exists. +NO_SUCH_ACTION = 6004 + +######################### +# Backup/Restore Errors # +######################### + +# Failed to create hot backup set. +HOT_BACKUP_INTERNAL = 7001 + +# Failed to restore to hot backup set. +HOT_RESTORE_INTERNAL = 7002 + +# The hot backup set cannot be restored on non-matching cluster topology. +BACKUP_TOPOLOGY = 7003 + +# No space left on device. +NO_SPACE_LEFT_ON_DEVICE = 7004 + +# Failed to upload hot backup set to remote target. +FAILED_TO_UPLOAD_BACKUP = 7005 + +# Failed to download hot backup set from remote source. +FAILED_TO_DOWNLOAD_BACKUP = 7006 + +# Cannot find a hot backup set with this ID. +NO_SUCH_HOT_BACKUP = 7007 + +# Invalid remote repository configuration. +REMOTE_REPOSITORY_CONFIG_BAD = 7008 + +# Some of the db servers cannot be reached for transaction locks. +LOCAL_LOCK_FAILED = 7009 + +# Some of the db servers cannot be reached for transaction locks. +LOCAL_LOCK_RETRY = 7010 + +# Conflict of multiple hot backup processes. +HOT_BACKUP_CONFLICT = 7011 + +# One or more db servers could not be reached for hot backup inquiry. +HOT_BACKUP_DBSERVERS_AWOL = 7012 diff --git a/aioarango/exceptions.py b/aioarango/exceptions.py new file mode 100644 index 000000000..103d173d4 --- /dev/null +++ b/aioarango/exceptions.py @@ -0,0 +1,963 @@ +from typing import Optional + +from aioarango.request import Request +from aioarango.response import Response + + +class ArangoError(Exception): + """Base class for all exceptions in aioarango.""" + + +class ArangoClientError(ArangoError): + """Base class for errors originating from aioarango client. + + :param msg: Error message. + :type msg: str + + :cvar source: Source of the error (always set to "client"). + :vartype source: str + :ivar message: Error message. + :vartype message: str + """ + + source = "client" + + def __init__(self, msg: str) -> None: + super().__init__(msg) + self.message = msg + self.error_message = None + self.error_code = None + self.url = None + self.response = None + self.request = None + self.http_method = None + self.http_code = None + self.http_headers = None + + +class ArangoServerError(ArangoError): + """Base class for errors originating from ArangoDB server. + + :param resp: HTTP response. + :type resp: aioarango.response.Response + :param msg: Error message override. + :type msg: str + + :cvar source: Source of the error (always set to "server"). + :vartype source: str + :ivar message: Exception message. + :vartype message: str + :ivar url: API URL. + :vartype url: str + :ivar response: HTTP response object. + :vartype response: aioarango.response.Response + :ivar request: HTTP request object. + :vartype request: aioarango.request.Request + :ivar http_method: HTTP method in lowercase (e.g. "post"). + :vartype http_method: str + :ivar http_code: HTTP status code. + :vartype http_code: int + :ivar http_headers: Response headers. + :vartype http_headers: dict + :ivar error_code: Error code from ArangoDB server. + :vartype error_code: int + :ivar error_message: Raw error message from ArangoDB server. + :vartype error_message: str + """ + + source = "server" + + def __init__( + self, resp: Response, request: Request, msg: Optional[str] = None + ) -> None: + msg = msg or resp.error_message or resp.status_text + self.error_message = resp.error_message + self.error_code = resp.error_code + if self.error_code is not None: + msg = f"[HTTP {resp.status_code}][ERR {self.error_code}] {msg}" + else: + msg = f"[HTTP {resp.status_code}] {msg}" + self.error_code = resp.status_code + super().__init__(msg) + self.message = msg + self.url = resp.url + self.response = resp + self.request = request + self.http_method = resp.method + self.http_code = resp.status_code + self.http_headers = resp.headers + + +################## +# AQL Exceptions # +################## + + +class AQLQueryListError(ArangoServerError): + """Failed to retrieve running AQL queries.""" + + +class AQLQueryExplainError(ArangoServerError): + """Failed to parse and explain query.""" + + +class AQLQueryValidateError(ArangoServerError): + """Failed to parse and validate query.""" + + +class AQLQueryExecuteError(ArangoServerError): + """Failed to execute query.""" + + +class AQLQueryKillError(ArangoServerError): + """Failed to kill the query.""" + + +class AQLQueryClearError(ArangoServerError): + """Failed to clear slow AQL queries.""" + + +class AQLQueryTrackingGetError(ArangoServerError): + """Failed to retrieve AQL tracking properties.""" + + +class AQLQueryTrackingSetError(ArangoServerError): + """Failed to configure AQL tracking properties.""" + + +class AQLCachePropertiesError(ArangoServerError): + """Failed to retrieve query cache properties.""" + + +class AQLCacheConfigureError(ArangoServerError): + """Failed to configure query cache properties.""" + + +class AQLCacheEntriesError(ArangoServerError): + """Failed to retrieve AQL cache entries.""" + + +class AQLCacheClearError(ArangoServerError): + """Failed to clear the query cache.""" + + +class AQLFunctionListError(ArangoServerError): + """Failed to retrieve AQL user functions.""" + + +class AQLFunctionCreateError(ArangoServerError): + """Failed to create AQL user function.""" + + +class AQLFunctionDeleteError(ArangoServerError): + """Failed to delete AQL user function.""" + + +############################## +# Async Execution Exceptions # +############################## + + +class AsyncExecuteError(ArangoServerError): + """Failed to execute async API request.""" + + +class AsyncJobListError(ArangoServerError): + """Failed to retrieve async jobs.""" + + +class AsyncJobCancelError(ArangoServerError): + """Failed to cancel async job.""" + + +class AsyncJobStatusError(ArangoServerError): + """Failed to retrieve async job status.""" + + +class AsyncJobResultError(ArangoServerError): + """Failed to retrieve async job result.""" + + +class AsyncJobClearError(ArangoServerError): + """Failed to clear async job results.""" + + +############################## +# Backup Exceptions # +############################## + + +class BackupCreateError(ArangoServerError): + """Failed to create a backup.""" + + +class BackupDeleteError(ArangoServerError): + """Failed to delete a backup.""" + + +class BackupDownloadError(ArangoServerError): + """Failed to download a backup from remote repository.""" + + +class BackupGetError(ArangoServerError): + """Failed to retrieve backup details.""" + + +class BackupRestoreError(ArangoServerError): + """Failed to restore from backup.""" + + +class BackupUploadError(ArangoServerError): + """Failed to upload a backup to remote repository.""" + + +############################## +# Batch Execution Exceptions # +############################## + + +class BatchStateError(ArangoClientError): + """The batch object was in a bad state.""" + + +class BatchJobResultError(ArangoClientError): + """Failed to retrieve batch job result.""" + + +class BatchExecuteError(ArangoServerError): + """Failed to execute batch API request.""" + + +######################### +# Collection Exceptions # +######################### + + +class CollectionListError(ArangoServerError): + """Failed to retrieve collections.""" + + +class CollectionPropertiesError(ArangoServerError): + """Failed to retrieve collection properties.""" + + +class CollectionConfigureError(ArangoServerError): + """Failed to configure collection properties.""" + + +class CollectionStatisticsError(ArangoServerError): + """Failed to retrieve collection statistics.""" + + +class CollectionRevisionError(ArangoServerError): + """Failed to retrieve collection revision.""" + + +class CollectionChecksumError(ArangoServerError): + """Failed to retrieve collection checksum.""" + + +class CollectionCreateError(ArangoServerError): + """Failed to create collection.""" + + +class CollectionDeleteError(ArangoServerError): + """Failed to delete collection.""" + + +class CollectionRenameError(ArangoServerError): + """Failed to rename collection.""" + + +class CollectionTruncateError(ArangoServerError): + """Failed to truncate collection.""" + + +class CollectionLoadError(ArangoServerError): + """Failed to load collection.""" + + +class CollectionUnloadError(ArangoServerError): + """Failed to unload collection.""" + + +class CollectionRecalculateCountError(ArangoServerError): + """Failed to recalculate document count.""" + + +class CollectionResponsibleShardError(ArangoServerError): + """Failed to retrieve responsible shard.""" + + +##################### +# Cursor Exceptions # +##################### + + +class CursorStateError(ArangoClientError): + """The cursor object was in a bad state.""" + + +class CursorCountError(ArangoClientError, TypeError): + """The cursor count was not enabled.""" + + +class CursorEmptyError(ArangoClientError): + """The current batch in cursor was empty.""" + + +class CursorNextError(ArangoServerError): + """Failed to retrieve the next result batch from server.""" + + +class CursorCloseError(ArangoServerError): + """Failed to delete the cursor result from server.""" + + +####################### +# Database Exceptions # +####################### + + +class DatabaseListError(ArangoServerError): + """Failed to retrieve databases.""" + + +class DatabasePropertiesError(ArangoServerError): + """Failed to retrieve database properties.""" + + +class DatabaseCreateError(ArangoServerError): + """Failed to create database.""" + + +class DatabaseDeleteError(ArangoServerError): + """Failed to delete database.""" + + +####################### +# Document Exceptions # +####################### + + +class DocumentParseError(ArangoClientError): + """Failed to parse document input.""" + + +class DocumentCountError(ArangoServerError): + """Failed to retrieve document count.""" + + +class DocumentInError(ArangoServerError): + """Failed to check whether document exists.""" + + +class DocumentGetError(ArangoServerError): + """Failed to retrieve document.""" + + +class DocumentKeysError(ArangoServerError): + """Failed to retrieve document keys.""" + + +class DocumentIDsError(ArangoServerError): + """Failed to retrieve document IDs.""" + + +class DocumentInsertError(ArangoServerError): + """Failed to insert document.""" + + +class DocumentReplaceError(ArangoServerError): + """Failed to replace document.""" + + +class DocumentUpdateError(ArangoServerError): + """Failed to update document.""" + + +class DocumentDeleteError(ArangoServerError): + """Failed to delete document.""" + + +class DocumentRevisionError(ArangoServerError): + """The expected and actual document revisions mismatched.""" + + +################### +# Foxx Exceptions # +################### + + +class FoxxServiceListError(ArangoServerError): + """Failed to retrieve Foxx services.""" + + +class FoxxServiceGetError(ArangoServerError): + """Failed to retrieve Foxx service metadata.""" + + +class FoxxServiceCreateError(ArangoServerError): + """Failed to create Foxx service.""" + + +class FoxxServiceUpdateError(ArangoServerError): + """Failed to update Foxx service.""" + + +class FoxxServiceReplaceError(ArangoServerError): + """Failed to replace Foxx service.""" + + +class FoxxServiceDeleteError(ArangoServerError): + """Failed to delete Foxx services.""" + + +class FoxxConfigGetError(ArangoServerError): + """Failed to retrieve Foxx service configuration.""" + + +class FoxxConfigUpdateError(ArangoServerError): + """Failed to update Foxx service configuration.""" + + +class FoxxConfigReplaceError(ArangoServerError): + """Failed to replace Foxx service configuration.""" + + +class FoxxDependencyGetError(ArangoServerError): + """Failed to retrieve Foxx service dependencies.""" + + +class FoxxDependencyUpdateError(ArangoServerError): + """Failed to update Foxx service dependencies.""" + + +class FoxxDependencyReplaceError(ArangoServerError): + """Failed to replace Foxx service dependencies.""" + + +class FoxxScriptListError(ArangoServerError): + """Failed to retrieve Foxx service scripts.""" + + +class FoxxScriptRunError(ArangoServerError): + """Failed to run Foxx service script.""" + + +class FoxxTestRunError(ArangoServerError): + """Failed to run Foxx service tests.""" + + +class FoxxDevModeEnableError(ArangoServerError): + """Failed to enable development mode for Foxx service.""" + + +class FoxxDevModeDisableError(ArangoServerError): + """Failed to disable development mode for Foxx service.""" + + +class FoxxReadmeGetError(ArangoServerError): + """Failed to retrieve Foxx service readme.""" + + +class FoxxSwaggerGetError(ArangoServerError): + """Failed to retrieve Foxx service swagger.""" + + +class FoxxDownloadError(ArangoServerError): + """Failed to download Foxx service bundle.""" + + +class FoxxCommitError(ArangoServerError): + """Failed to commit local Foxx service state.""" + + +#################### +# Graph Exceptions # +#################### + + +class GraphListError(ArangoServerError): + """Failed to retrieve graphs.""" + + +class GraphCreateError(ArangoServerError): + """Failed to create the graph.""" + + +class GraphDeleteError(ArangoServerError): + """Failed to delete the graph.""" + + +class GraphPropertiesError(ArangoServerError): + """Failed to retrieve graph properties.""" + + +class GraphTraverseError(ArangoServerError): + """Failed to execute graph traversal.""" + + +class VertexCollectionListError(ArangoServerError): + """Failed to retrieve vertex collections.""" + + +class VertexCollectionCreateError(ArangoServerError): + """Failed to create vertex collection.""" + + +class VertexCollectionDeleteError(ArangoServerError): + """Failed to delete vertex collection.""" + + +class EdgeDefinitionListError(ArangoServerError): + """Failed to retrieve edge definitions.""" + + +class EdgeDefinitionCreateError(ArangoServerError): + """Failed to create edge definition.""" + + +class EdgeDefinitionReplaceError(ArangoServerError): + """Failed to replace edge definition.""" + + +class EdgeDefinitionDeleteError(ArangoServerError): + """Failed to delete edge definition.""" + + +class EdgeListError(ArangoServerError): + """Failed to retrieve edges coming in and out of a vertex.""" + + +#################### +# Index Exceptions # +#################### + + +class IndexListError(ArangoServerError): + """Failed to retrieve collection indexes.""" + + +class IndexCreateError(ArangoServerError): + """Failed to create collection index.""" + + +class IndexDeleteError(ArangoServerError): + """Failed to delete collection index.""" + + +class IndexLoadError(ArangoServerError): + """Failed to load indexes into memory.""" + + +##################### +# Pregel Exceptions # +##################### + + +class PregelJobCreateError(ArangoServerError): + """Failed to create Pregel job.""" + + +class PregelJobGetError(ArangoServerError): + """Failed to retrieve Pregel job details.""" + + +class PregelJobDeleteError(ArangoServerError): + """Failed to delete Pregel job.""" + + +##################### +# Server Exceptions # +##################### + + +class ServerConnectionError(ArangoClientError): + """Failed to connect to ArangoDB server.""" + + +class ServerEngineError(ArangoServerError): + """Failed to retrieve database engine.""" + + +class ServerVersionError(ArangoServerError): + """Failed to retrieve server version.""" + + +class ServerDetailsError(ArangoServerError): + """Failed to retrieve server details.""" + + +class ServerStatusError(ArangoServerError): + """Failed to retrieve server status.""" + + +class ServerTimeError(ArangoServerError): + """Failed to retrieve server system time.""" + + +class ServerEchoError(ArangoServerError): + """Failed to retrieve details on last request.""" + + +class ServerShutdownError(ArangoServerError): + """Failed to initiate shutdown sequence.""" + + +class ServerRunTestsError(ArangoServerError): + """Failed to execute server tests.""" + + +class ServerRequiredDBVersionError(ArangoServerError): + """Failed to retrieve server target version.""" + + +class ServerReadLogError(ArangoServerError): + """Failed to retrieve global log.""" + + +class ServerLogLevelError(ArangoServerError): + """Failed to retrieve server log levels.""" + + +class ServerLogLevelSetError(ArangoServerError): + """Failed to set server log levels.""" + + +class ServerReloadRoutingError(ArangoServerError): + """Failed to reload routing details.""" + + +class ServerStatisticsError(ArangoServerError): + """Failed to retrieve server statistics.""" + + +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + +class ServerRoleError(ArangoServerError): + """Failed to retrieve server role in a cluster.""" + + +class ServerTLSError(ArangoServerError): + """Failed to retrieve TLS data.""" + + +class ServerTLSReloadError(ArangoServerError): + """Failed to reload TLS.""" + + +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + +##################### +# Task Exceptions # +##################### + + +class TaskListError(ArangoServerError): + """Failed to retrieve server tasks.""" + + +class TaskGetError(ArangoServerError): + """Failed to retrieve server task details.""" + + +class TaskCreateError(ArangoServerError): + """Failed to create server task.""" + + +class TaskDeleteError(ArangoServerError): + """Failed to delete server task.""" + + +########################## +# Transaction Exceptions # +########################## + + +class TransactionExecuteError(ArangoServerError): + """Failed to execute raw transaction.""" + + +class TransactionInitError(ArangoServerError): + """Failed to initialize transaction.""" + + +class TransactionStatusError(ArangoServerError): + """Failed to retrieve transaction status.""" + + +class TransactionCommitError(ArangoServerError): + """Failed to commit transaction.""" + + +class TransactionAbortError(ArangoServerError): + """Failed to abort transaction.""" + + +################### +# User Exceptions # +################### + + +class UserListError(ArangoServerError): + """Failed to retrieve users.""" + + +class UserGetError(ArangoServerError): + """Failed to retrieve user details.""" + + +class UserCreateError(ArangoServerError): + """Failed to create user.""" + + +class UserUpdateError(ArangoServerError): + """Failed to update user.""" + + +class UserReplaceError(ArangoServerError): + """Failed to replace user.""" + + +class UserDeleteError(ArangoServerError): + """Failed to delete user.""" + + +################### +# View Exceptions # +################### + + +class ViewListError(ArangoServerError): + """Failed to retrieve views.""" + + +class ViewGetError(ArangoServerError): + """Failed to retrieve view details.""" + + +class ViewCreateError(ArangoServerError): + """Failed to create view.""" + + +class ViewUpdateError(ArangoServerError): + """Failed to update view.""" + + +class ViewReplaceError(ArangoServerError): + """Failed to replace view.""" + + +class ViewDeleteError(ArangoServerError): + """Failed to delete view.""" + + +class ViewRenameError(ArangoServerError): + """Failed to rename view.""" + + +####################### +# Analyzer Exceptions # +####################### + + +class AnalyzerListError(ArangoServerError): + """Failed to retrieve analyzers.""" + + +class AnalyzerGetError(ArangoServerError): + """Failed to retrieve analyzer details.""" + + +class AnalyzerCreateError(ArangoServerError): + """Failed to create analyzer.""" + + +class AnalyzerDeleteError(ArangoServerError): + """Failed to delete analyzer.""" + + +######################### +# Permission Exceptions # +######################### + + +class PermissionListError(ArangoServerError): + """Failed to list user permissions.""" + + +class PermissionGetError(ArangoServerError): + """Failed to retrieve user permission.""" + + +class PermissionUpdateError(ArangoServerError): + """Failed to update user permission.""" + + +class PermissionResetError(ArangoServerError): + """Failed to reset user permission.""" + + +################## +# WAL Exceptions # +################## + + +class WALPropertiesError(ArangoServerError): + """Failed to retrieve WAL properties.""" + + +class WALConfigureError(ArangoServerError): + """Failed to configure WAL properties.""" + + +class WALTransactionListError(ArangoServerError): + """Failed to retrieve running WAL transactions.""" + + +class WALFlushError(ArangoServerError): + """Failed to flush WAL.""" + + +class WALTickRangesError(ArangoServerError): + """Failed to return WAL tick ranges.""" + + +class WALLastTickError(ArangoServerError): + """Failed to return WAL tick ranges.""" + + +class WALTailError(ArangoServerError): + """Failed to return WAL tick ranges.""" + + +########################## +# Replication Exceptions # +########################## + + +class ReplicationInventoryError(ArangoServerError): + """Failed to retrieve inventory of collection and indexes.""" + + +class ReplicationDumpBatchCreateError(ArangoServerError): + """Failed to create dump batch.""" + + +class ReplicationDumpBatchDeleteError(ArangoServerError): + """Failed to delete a dump batch.""" + + +class ReplicationDumpBatchExtendError(ArangoServerError): + """Failed to extend a dump batch.""" + + +class ReplicationDumpError(ArangoServerError): + """Failed to retrieve collection content.""" + + +class ReplicationSyncError(ArangoServerError): + """Failed to synchronize data from remote.""" + + +class ReplicationClusterInventoryError(ArangoServerError): + """Failed to retrieve overview of collection and indexes in a cluster.""" + + +class ReplicationLoggerStateError(ArangoServerError): + """Failed to retrieve logger state.""" + + +class ReplicationLoggerFirstTickError(ArangoServerError): + """Failed to retrieve logger first tick.""" + + +class ReplicationApplierConfigError(ArangoServerError): + """Failed to retrieve replication applier configuration.""" + + +class ReplicationApplierConfigSetError(ArangoServerError): + """Failed to update replication applier configuration.""" + + +class ReplicationApplierStartError(ArangoServerError): + """Failed to start replication applier.""" + + +class ReplicationApplierStopError(ArangoServerError): + """Failed to stop replication applier.""" + + +class ReplicationApplierStateError(ArangoServerError): + """Failed to retrieve replication applier state.""" + + +class ReplicationMakeSlaveError(ArangoServerError): + """Failed to change role to slave.""" + + +class ReplicationServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +###################### +# Cluster Exceptions # +###################### + + +class ClusterHealthError(ArangoServerError): + """Failed to retrieve DBServer health.""" + + +class ClusterServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +class ClusterServerRoleError(ArangoServerError): + """Failed to retrieve server role.""" + + +class ClusterServerStatisticsError(ArangoServerError): + """Failed to retrieve DBServer statistics.""" + + +class ClusterServerVersionError(ArangoServerError): + """Failed to retrieve server node version.""" + + +class ClusterServerEngineError(ArangoServerError): + """Failed to retrieve server node engine.""" + + +class ClusterMaintenanceModeError(ArangoServerError): + """Failed to enable/disable cluster supervision maintenance mode.""" + + +class ClusterEndpointsError(ArangoServerError): + """Failed to retrieve cluster endpoints.""" + + +class ClusterServerCountError(ArangoServerError): + """Failed to retrieve cluster server count.""" + + +################## +# JWT Exceptions # +################## + + +class JWTAuthError(ArangoServerError): + """Failed to get a new JWT token from ArangoDB.""" + + +class JWTSecretListError(ArangoServerError): + """Failed to retrieve information on currently loaded JWT secrets.""" + + +class JWTSecretReloadError(ArangoServerError): + """Failed to reload JWT secrets.""" diff --git a/aioarango/executor.py b/aioarango/executor.py new file mode 100644 index 000000000..36fb674d7 --- /dev/null +++ b/aioarango/executor.py @@ -0,0 +1,42 @@ +from typing import Callable, TypeVar, Union + +from aioarango.connection import Connection +from aioarango.request import Request +from aioarango.response import Response + +ApiExecutor = Union[ + "DefaultApiExecutor", + "AsyncApiExecutor", + "BatchApiExecutor", + "TransactionApiExecutor", +] + +T = TypeVar("T") + + +class DefaultApiExecutor: + """Default API executor. + + :param connection: HTTP connection. + :type connection: aioarango.connection.BasicConnection | + aioarango.connection.JwtConnection | aioarango.connection.JwtSuperuserConnection + """ + + def __init__(self, connection: Connection) -> None: + self._conn = connection + + @property + def context(self) -> str: + return "default" + + async def execute(self, request: Request, response_handler: Callable[[Response], T]) -> T: + """Execute an API request and return the result. + + :param request: HTTP request. + :type request: aioarango.request.Request + :param response_handler: HTTP response handler. + :type response_handler: callable + :return: API execution result. + """ + resp = await self._conn.send_request(request) + return response_handler(resp) diff --git a/aioarango/formatter.py b/aioarango/formatter.py new file mode 100644 index 000000000..bfa94f3f2 --- /dev/null +++ b/aioarango/formatter.py @@ -0,0 +1,1143 @@ +from typing import Any + +from aioarango.typings import Headers, Json + + +def verify_format(_: Any, res: Json) -> Json: + return res + + +def format_body(body: Json) -> Json: + """Format generic response body. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + body.pop("error", None) + body.pop("code", None) + return body + + +def format_index(body: Json) -> Json: + """Format index data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = {"id": body["id"].split("/", 1)[-1], "fields": body["fields"]} + if "type" in body: + result["type"] = body["type"] + if "name" in body: + result["name"] = body["name"] + if "deduplicate" in body: + result["deduplicate"] = body["deduplicate"] + if "sparse" in body: + result["sparse"] = body["sparse"] + if "unique" in body: + result["unique"] = body["unique"] + if "minLength" in body: + result["min_length"] = body["minLength"] + if "geoJson" in body: + result["geo_json"] = body["geoJson"] + if "ignoreNull" in body: + result["ignore_none"] = body["ignoreNull"] + if "selectivityEstimate" in body: + result["selectivity"] = body["selectivityEstimate"] + if "isNewlyCreated" in body: + result["new"] = body["isNewlyCreated"] + if "expireAfter" in body: + result["expiry_time"] = body["expireAfter"] + if "inBackground" in body: + result["in_background"] = body["inBackground"] + if "bestIndexedLevel" in body: + result["best_indexed_level"] = body["bestIndexedLevel"] + if "worstIndexedLevel" in body: + result["worst_indexed_level"] = body["worstIndexedLevel"] + if "maxNumCoverCells" in body: + result["max_num_cover_cells"] = body["maxNumCoverCells"] + + return verify_format(body, result) + + +def format_key_options(body: Json) -> Json: + """Format collection key options data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "type" in body: + result["key_generator"] = body["type"] + if "increment" in body: + result["key_increment"] = body["increment"] + if "offset" in body: + result["key_offset"] = body["offset"] + if "allowUserKeys" in body: + result["user_keys"] = body["allowUserKeys"] + if "lastValue" in body: + result["key_last_value"] = body["lastValue"] + + return verify_format(body, result) + + +def format_database(body: Json) -> Json: + """Format databases info. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "id" in body: + result["id"] = body["id"] + if "name" in body: + result["name"] = body["name"] + if "path" in body: + result["path"] = body["path"] + if "system" in body: + result["system"] = body["system"] + if "isSystem" in body: + result["system"] = body["isSystem"] + + # Cluster only + if "sharding" in body: + result["sharding"] = body["sharding"] + if "replicationFactor" in body: + result["replication_factor"] = body["replicationFactor"] + if "writeConcern" in body: + result["write_concern"] = body["writeConcern"] + + return verify_format(body, result) + + +def format_collection(body: Json) -> Json: + """Format collection data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "id" in body: + result["id"] = body["id"] + if "objectId" in body: + result["object_id"] = body["objectId"] + if "name" in body: + result["name"] = body["name"] + if "isSystem" in body: + result["system"] = body["isSystem"] + if "isSmart" in body: + result["smart"] = body["isSmart"] + if "type" in body: + result["type"] = body["type"] + result["edge"] = body["type"] == 3 + if "waitForSync" in body: + result["sync"] = body["waitForSync"] + + if "status" in body: + result["status"] = body["status"] + if "statusString" in body: + result["status_string"] = body["statusString"] + if "globallyUniqueId" in body: + result["global_id"] = body["globallyUniqueId"] + if "cacheEnabled" in body: + result["cache"] = body["cacheEnabled"] + if "replicationFactor" in body: + result["replication_factor"] = body["replicationFactor"] + if "minReplicationFactor" in body: + result["min_replication_factor"] = body["minReplicationFactor"] + if "writeConcern" in body: + result["write_concern"] = body["writeConcern"] + + # Cluster only + if "shards" in body: + result["shards"] = body["shards"] + if "replicationFactor" in body: + result["replication_factor"] = body["replicationFactor"] + if "numberOfShards" in body: + result["shard_count"] = body["numberOfShards"] + if "shardKeys" in body: + result["shard_fields"] = body["shardKeys"] + if "distributeShardsLike" in body: + result["shard_like"] = body["distributeShardsLike"] + if "shardingStrategy" in body: + result["sharding_strategy"] = body["shardingStrategy"] + if "smartJoinAttribute" in body: + result["smart_join_attribute"] = body["smartJoinAttribute"] + + # Key Generator + if "keyOptions" in body: + result["key_options"] = format_key_options(body["keyOptions"]) + + # Replication only + if "cid" in body: + result["cid"] = body["cid"] + if "version" in body: + result["version"] = body["version"] + if "allowUserKeys" in body: + result["user_keys"] = body["allowUserKeys"] + if "planId" in body: + result["plan_id"] = body["planId"] + if "deleted" in body: + result["deleted"] = body["deleted"] + + # New in 3.7 + if "syncByRevision" in body: + result["sync_by_revision"] = body["syncByRevision"] + if "tempObjectId" in body: + result["temp_object_id"] = body["tempObjectId"] + if "usesRevisionsAsDocumentIds" in body: + result["rev_as_id"] = body["usesRevisionsAsDocumentIds"] + if "isDisjoint" in body: + result["disjoint"] = body["isDisjoint"] + if "isSmartChild" in body: + result["smart_child"] = body["isSmartChild"] + if "minRevision" in body: + result["min_revision"] = body["minRevision"] + if "schema" in body: + result["schema"] = body["schema"] + + return verify_format(body, result) + + +def format_aql_cache(body: Json) -> Json: + """Format AQL cache data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = { + "mode": body["mode"], + "max_results": body["maxResults"], + "max_results_size": body["maxResultsSize"], + "max_entry_size": body["maxEntrySize"], + "include_system": body["includeSystem"], + } + return verify_format(body, result) + + +def format_wal_properties(body: Json) -> Json: + """Format WAL properties. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "allowOversizeEntries" in body: + result["oversized_ops"] = body["allowOversizeEntries"] + if "logfileSize" in body: + result["log_size"] = body["logfileSize"] + if "historicLogfiles" in body: + result["historic_logs"] = body["historicLogfiles"] + if "reserveLogfiles" in body: + result["reserve_logs"] = body["reserveLogfiles"] + if "syncInterval" in body: + result["sync_interval"] = body["syncInterval"] + if "throttleWait" in body: + result["throttle_wait"] = body["throttleWait"] + if "throttleWhenPending" in body: + result["throttle_limit"] = body["throttleWhenPending"] + + return verify_format(body, result) + + +def format_wal_transactions(body: Json) -> Json: + """Format WAL transactions. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "minLastCollected" in body: + result["last_collected"] = body["minLastCollected"] + if "minLastSealed" in body: + result["last_sealed"] = body["minLastSealed"] + if "runningTransactions" in body: + result["count"] = body["runningTransactions"] + + return verify_format(body, result) + + +def format_aql_query(body: Json) -> Json: + """Format AQL query data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = {"id": body["id"], "query": body["query"]} + if "database" in body: + result["database"] = body["database"] + if "bindVars" in body: + result["bind_vars"] = body["bindVars"] + if "runTime" in body: + result["runtime"] = body["runTime"] + if "started" in body: + result["started"] = body["started"] + if "state" in body: + result["state"] = body["state"] + if "stream" in body: + result["stream"] = body["stream"] + if "user" in body: + result["user"] = body["user"] + return verify_format(body, result) + + +def format_aql_tracking(body: Json) -> Json: + """Format AQL tracking data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "enabled" in body: + result["enabled"] = body["enabled"] + if "maxQueryStringLength" in body: + result["max_query_string_length"] = body["maxQueryStringLength"] + if "maxSlowQueries" in body: + result["max_slow_queries"] = body["maxSlowQueries"] + if "slowQueryThreshold" in body: + result["slow_query_threshold"] = body["slowQueryThreshold"] + if "slowStreamingQueryThreshold" in body: + result["slow_streaming_query_threshold"] = body["slowStreamingQueryThreshold"] + if "trackBindVars" in body: + result["track_bind_vars"] = body["trackBindVars"] + if "trackSlowQueries" in body: + result["track_slow_queries"] = body["trackSlowQueries"] + + return verify_format(body, result) + + +def format_tick_values(body: Json) -> Json: + """Format tick data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "tickMin" in body: + result["tick_min"] = body["tickMin"] + if "tickMax" in body: + result["tick_max"] = body["tickMax"] + if "tick" in body: + result["tick"] = body["tick"] + if "time" in body: + result["time"] = body["time"] + if "server" in body: + result["server"] = format_server_info(body["server"]) + + return verify_format(body, result) + + +def format_server_info(body: Json) -> Json: + """Format server data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + return {"version": body["version"], "server_id": body["serverId"]} + + +def format_server_status(body: Json) -> Json: + """Format server status. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "foxxApi" in body: + result["foxx_api"] = body["foxxApi"] + if "host" in body: + result["host"] = body["host"] + if "hostname" in body: + result["hostname"] = body["hostname"] + if "license" in body: + result["license"] = body["license"] + if "mode" in body: + result["mode"] = body["mode"] + if "operationMode" in body: + result["operation_mode"] = body["operationMode"] + if "pid" in body: + result["pid"] = body["pid"] + if "server" in body: + result["server"] = body["server"] + if "serverInfo" in body: + info = body["serverInfo"] + if "writeOpsEnabled" in info: + info["write_ops_enabled"] = info.pop("writeOpsEnabled") + if "readOnly" in info: + info["read_only"] = info.pop("readOnly") + result["server_info"] = info + if "version" in body: + result["version"] = body["version"] + + return verify_format(body, result) + + +def format_replication_applier_config(body: Json) -> Json: + """Format replication applier configuration data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "endpoint" in body: + result["endpoint"] = body["endpoint"] + if "database" in body: + result["database"] = body["database"] + if "username" in body: + result["username"] = body["username"] + if "verbose" in body: + result["verbose"] = body["verbose"] + if "incremental" in body: + result["incremental"] = body["incremental"] + if "requestTimeout" in body: + result["request_timeout"] = body["requestTimeout"] + if "connectTimeout" in body: + result["connect_timeout"] = body["connectTimeout"] + if "ignoreErrors" in body: + result["ignore_errors"] = body["ignoreErrors"] + if "maxConnectRetries" in body: + result["max_connect_retries"] = body["maxConnectRetries"] + if "lockTimeoutRetries" in body: + result["lock_timeout_retries"] = body["lockTimeoutRetries"] + if "sslProtocol" in body: + result["ssl_protocol"] = body["sslProtocol"] + if "chunkSize" in body: + result["chunk_size"] = body["chunkSize"] + if "skipCreateDrop" in body: + result["skip_create_drop"] = body["skipCreateDrop"] + if "autoStart" in body: + result["auto_start"] = body["autoStart"] + if "adaptivePolling" in body: + result["adaptive_polling"] = body["adaptivePolling"] + if "autoResync" in body: + result["auto_resync"] = body["autoResync"] + if "autoResyncRetries" in body: + result["auto_resync_retries"] = body["autoResyncRetries"] + if "maxPacketSize" in body: + result["max_packet_size"] = body["maxPacketSize"] + if "includeSystem" in body: + result["include_system"] = body["includeSystem"] + if "includeFoxxQueues" in body: + result["include_foxx_queues"] = body["includeFoxxQueues"] + if "requireFromPresent" in body: + result["require_from_present"] = body["requireFromPresent"] + if "restrictType" in body: + result["restrict_type"] = body["restrictType"] + if "restrictCollections" in body: + result["restrict_collections"] = body["restrictCollections"] + if "connectionRetryWaitTime" in body: + result["connection_retry_wait_time"] = body["connectionRetryWaitTime"] + if "initialSyncMaxWaitTime" in body: + result["initial_sync_max_wait_time"] = body["initialSyncMaxWaitTime"] + if "idleMinWaitTime" in body: + result["idle_min_wait_time"] = body["idleMinWaitTime"] + if "idleMaxWaitTime" in body: + result["idle_max_wait_time"] = body["idleMaxWaitTime"] + + return verify_format(body, result) + + +def format_applier_progress(body: Json) -> Json: + """Format replication applier progress data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "time" in body: + result["time"] = body["time"] + if "message" in body: + result["message"] = body["message"] + if "failedConnects" in body: + result["failed_connects"] = body["failedConnects"] + + return verify_format(body, result) + + +def format_applier_error(body: Json) -> Json: + """Format replication applier error data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "errorNum" in body: + result["error_num"] = body["errorNum"] + if "errorMessage" in body: + result["error_message"] = body["errorMessage"] + if "time" in body: + result["time"] = body["time"] + + return verify_format(body, result) + + +def format_applier_state_details(body: Json) -> Json: + """Format replication applier state details. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "started" in body: + result["started"] = body["started"] + if "running" in body: + result["running"] = body["running"] + if "phase" in body: + result["phase"] = body["phase"] + if "time" in body: + result["time"] = body["time"] + if "safeResumeTick" in body: + result["safe_resume_tick"] = body["safeResumeTick"] + if "ticksBehind" in body: + result["ticks_behind"] = body["ticksBehind"] + if "lastAppliedContinuousTick" in body: + result["last_applied_continuous_tick"] = body["lastAppliedContinuousTick"] + if "lastProcessedContinuousTick" in body: + result["last_processed_continuous_tick"] = body["lastProcessedContinuousTick"] + if "lastAvailableContinuousTick" in body: + result["last_available_continuous_tick"] = body["lastAvailableContinuousTick"] + if "progress" in body: + result["progress"] = format_applier_progress(body["progress"]) + if "totalRequests" in body: + result["total_requests"] = body["totalRequests"] + if "totalFailedConnects" in body: + result["total_failed_connects"] = body["totalFailedConnects"] + if "totalEvents" in body: + result["total_events"] = body["totalEvents"] + if "totalDocuments" in body: + result["total_documents"] = body["totalDocuments"] + if "totalRemovals" in body: + result["total_removals"] = body["totalRemovals"] + if "totalResyncs" in body: + result["total_resyncs"] = body["totalResyncs"] + if "totalOperationsExcluded" in body: + result["total_operations_excluded"] = body["totalOperationsExcluded"] + if "totalApplyTime" in body: + result["total_apply_time"] = body["totalApplyTime"] + if "averageApplyTime" in body: + result["average_apply_time"] = body["averageApplyTime"] + if "totalFetchTime" in body: + result["total_fetch_time"] = body["totalFetchTime"] + if "averageFetchTime" in body: + result["average_fetch_time"] = body["averageFetchTime"] + if "lastError" in body: + result["last_error"] = format_applier_error(body["lastError"]) + + return verify_format(body, result) + + +def format_replication_applier_state(body: Json) -> Json: + """Format replication applier state. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "endpoint" in body: + result["endpoint"] = body["endpoint"] + if "database" in body: + result["database"] = body["database"] + if "username" in body: + result["username"] = body["username"] + if "state" in body: + result["state"] = format_applier_state_details(body["state"]) + if "server" in body: + result["server"] = format_server_info(body["server"]) + + return verify_format(body, result) + + +def format_replication_state(body: Json) -> Json: + """Format replication state. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + if not isinstance(body, dict): + return body + + result: Json = {} + if "running" in body: + result["running"] = body["running"] + if "time" in body: + result["time"] = body["time"] + if "lastLogTick" in body: + result["last_log_tick"] = body["lastLogTick"] + if "totalEvents" in body: + result["total_events"] = body["totalEvents"] + if "lastUncommittedLogTick" in body: + result["last_uncommitted_log_tick"] = body["lastUncommittedLogTick"] + + return verify_format(body, result) + + +def format_replication_logger_state(body: Json) -> Json: + """Format replication collection data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "state" in body: + result["state"] = format_replication_state(body["state"]) + if "server" in body: + result["server"] = format_server_info(body["server"]) + if "clients" in body: + result["clients"] = body["clients"] + + return verify_format(body, result) + + +def format_replication_collection(body: Json) -> Json: + """Format replication collection data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "planVersion" in body: + result["plan_version"] = body["planVersion"] + if "isReady" in body: + result["is_ready"] = body["isReady"] + if "allInSync" in body: + result["all_in_sync"] = body["allInSync"] + if "indexes" in body: + result["indexes"] = [format_index(index) for index in body["indexes"]] + if "parameters" in body: + result["parameters"] = format_collection(body["parameters"]) + + return verify_format(body, result) + + +def format_replication_database(body: Json) -> Json: + """Format replication database data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = { + "id": body["id"], + "name": body["name"], + "collections": [ + format_replication_collection(col) for col in body["collections"] + ], + "views": [format_view(view) for view in body["views"]], + } + if "properties" in body: + result["properties"] = format_database(body["properties"]) + + return verify_format(body, result) + + +def format_replication_inventory(body: Json) -> Json: + """Format replication inventory data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "tick" in body: + result["tick"] = body["tick"] + if "state" in body: + result["state"] = format_replication_state(body["state"]) + + if "databases" in body: + result["databases"] = { + k: format_replication_database(v) for k, v in body["databases"].items() + } + if "collections" in body: + result["collections"] = [ + format_replication_collection(col) for col in body["collections"] + ] + if "views" in body: + result["views"] = [format_view(view) for view in body["views"]] + if "properties" in body: + result["properties"] = format_database(body["properties"]) + + return verify_format(body, result) + + +def format_replication_sync(body: Json) -> Json: + """Format replication sync result. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "collections" in body: + result["collections"] = body["collections"] + if "lastLogTick" in body: + result["last_log_tick"] = body["lastLogTick"] + return verify_format(body, result) + + +def format_replication_header(headers: Headers) -> Json: + """Format replication headers. + + :param headers: Request headers. + :type headers: dict + :return: Formatted body. + :rtype: dict + """ + headers = {k.lower(): v for k, v in headers.items()} + result: Json = {} + + if "x-arango-replication-frompresent" in headers: + result["from_present"] = headers["x-arango-replication-frompresent"] == "true" + + if "x-arango-replication-lastincluded" in headers: + result["last_included"] = headers["x-arango-replication-lastincluded"] + + if "x-arango-replication-lastscanned" in headers: + result["last_scanned"] = headers["x-arango-replication-lastscanned"] + + if "x-arango-replication-lasttick" in headers: + result["last_tick"] = headers["x-arango-replication-lasttick"] + + if "x-arango-replication-active" in headers: + result["active"] = headers["x-arango-replication-active"] == "true" + + if "x-arango-replication-checkmore" in headers: + result["check_more"] = headers["x-arango-replication-checkmore"] == "true" + + return result + + +def format_view_link(body: Json) -> Json: + """Format view link data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "analyzers" in body: + result["analyzers"] = body["analyzers"] + if "fields" in body: + result["fields"] = body["fields"] + if "includeAllFields" in body: + result["include_all_fields"] = body["includeAllFields"] + if "trackListPositions" in body: + result["track_list_positions"] = body["trackListPositions"] + if "storeValues" in body: + result["store_values"] = body["storeValues"] + + return verify_format(body, result) + + +def format_view_consolidation_policy(body: Json) -> Json: + """Format view consolidation policy data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "type" in body: + result["type"] = body["type"] + if "threshold" in body: + result["threshold"] = body["threshold"] + if "segmentsMin" in body: + result["segments_min"] = body["segmentsMin"] + if "segmentsMax" in body: + result["segments_max"] = body["segmentsMax"] + if "segmentsBytesMax" in body: + result["segments_bytes_max"] = body["segmentsBytesMax"] + if "segmentsBytesFloor" in body: + result["segments_bytes_floor"] = body["segmentsBytesFloor"] + if "minScore" in body: + result["min_score"] = body["minScore"] + + return verify_format(body, result) + + +def format_view(body: Json) -> Json: + """Format view data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "globallyUniqueId" in body: + result["global_id"] = body["globallyUniqueId"] + if "id" in body: + result["id"] = body["id"] + if "name" in body: + result["name"] = body["name"] + if "type" in body: + result["type"] = body["type"] + if "cleanupIntervalStep" in body: + result["cleanup_interval_step"] = body["cleanupIntervalStep"] + if "commitIntervalMsec" in body: + result["commit_interval_msec"] = body["commitIntervalMsec"] + if "consolidationIntervalMsec" in body: + result["consolidation_interval_msec"] = body["consolidationIntervalMsec"] + if "consolidationPolicy" in body: + result["consolidation_policy"] = format_view_consolidation_policy( + body["consolidationPolicy"] + ) + if "primarySort" in body: + result["primary_sort"] = body["primarySort"] + if "primarySortCompression" in body: + result["primary_sort_compression"] = body["primarySortCompression"] + if "storedValues" in body: + result["stored_values"] = body["storedValues"] + if "writebufferIdle" in body: + result["writebuffer_idle"] = body["writebufferIdle"] + if "writebufferActive" in body: + result["writebuffer_active"] = body["writebufferActive"] + if "writebufferSizeMax" in body: + result["writebuffer_max_size"] = body["writebufferSizeMax"] + if "links" in body: + result["links"] = { + name: format_view_link(link) for name, link in body["links"].items() + } + + return verify_format(body, result) + + +def format_vertex(body: Json) -> Json: + """Format vertex data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + vertex: Json = body["vertex"] + if "_oldRev" in vertex: + vertex["_old_rev"] = vertex.pop("_oldRev") + + if "new" in body or "old" in body: + result: Json = {"vertex": vertex} + if "new" in body: + result["new"] = body["new"] + if "old" in body: + result["old"] = body["old"] + return result + else: + return vertex + + +def format_edge(body: Json) -> Json: + """Format edge data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + edge: Json = body["edge"] + if "_oldRev" in edge: + edge["_old_rev"] = edge.pop("_oldRev") + + if "new" in body or "old" in body: + result: Json = {"edge": edge} + if "new" in body: + result["new"] = body["new"] + if "old" in body: + result["old"] = body["old"] + return result + else: + return edge + + +def format_tls(body: Json) -> Json: + """Format TLS data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = body + return verify_format(body, result) + + +def format_backup(body: Json) -> Json: + """Format backup entry. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "previous" in body: + result["previous"] = body["previous"] + if "id" in body: + result["backup_id"] = body["id"] + if "datetime" in body: + result["datetime"] = body["datetime"] + if "potentiallyInconsistent" in body: + result["potentially_inconsistent"] = body["potentiallyInconsistent"] + if "sizeInBytes" in body: + result["size_in_bytes"] = body["sizeInBytes"] + if "nrDBServers" in body: + result["dbserver_count"] = body["nrDBServers"] + if "nrFiles" in body: + result["file_count"] = body["nrFiles"] + + if "available" in body: + result["available"] = body["available"] + if "version" in body: + result["version"] = body["version"] + if "keys" in body: + result["keys"] = body["keys"] + if "nrPiecesPresent" in body: + result["pieces_present"] = body["nrPiecesPresent"] + + return verify_format(body, result) + + +def format_backups(body: Json) -> Json: + """Format backup entries. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "server" in body: + result["server"] = body["server"] + if "list" in body: + result["list"] = { + key: format_backup(backup) for key, backup in body["list"].items() + } + return verify_format(body, result) + + +def format_backup_restore(body: Json) -> Json: + """Format backup restore data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "id" in body: + result["backup_id"] = body["id"] + if "isCluster" in body: + result["is_cluster"] = body["isCluster"] + if "previous" in body: + result["previous"] = body["previous"] + + return verify_format(body, result) + + +def format_backup_dbserver(body: Json) -> Json: + """Format backup DBserver data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + return {"status": body["Status"]} + + +def format_backup_transfer(body: Json) -> Json: + """Format backup download/upload data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + if "Timestamp" in body: + result["timestamp"] = body["Timestamp"] + if "DownloadId" in body: + result["download_id"] = body["DownloadId"] + if "downloadId" in body: + result["download_id"] = body["downloadId"] + if "UploadId" in body: + result["upload_id"] = body["UploadId"] + if "uploadId" in body: + result["upload_id"] = body["uploadId"] + if "Cancelled" in body: + result["cancelled"] = body["Cancelled"] + if "BackupId" in body: + result["backup_id"] = body["BackupId"] + if "DBServers" in body: + result["dbservers"] = { + k: format_backup_dbserver(v) for k, v in body["DBServers"].items() + } + return verify_format(body, result) + + +def format_service_data(body: Json) -> Json: + """Format Foxx service data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + if "manifest" in body: + manifest = body["manifest"] + if "defaultDocument" in manifest: + manifest["default_document"] = manifest.pop("defaultDocument") + + return body + + +def format_pregel_job_data(body: Json) -> Json: + """Format Pregel job data. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result: Json = {} + + if "aggregators" in body: + result["aggregators"] = body["aggregators"] + if "computationTime" in body: + result["computation_time"] = body["computationTime"] + if "edgeCount" in body: + result["edge_count"] = body["edgeCount"] + if "gss" in body: + result["gss"] = body["gss"] + if "receivedCount" in body: + result["received_count"] = body["receivedCount"] + if "sendCount" in body: + result["send_count"] = body["sendCount"] + if "startupTime" in body: + result["startup_time"] = body["startupTime"] + if "state" in body: + result["state"] = body["state"] + if "totalRuntime" in body: + result["total_runtime"] = body["totalRuntime"] + if "vertexCount" in body: + result["vertex_count"] = body["vertexCount"] + + return verify_format(body, result) + + +def format_graph_properties(body: Json) -> Json: + """Format graph properties. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = { + "id": body["_id"], + "key": body["_key"], + "name": body["name"], + "revision": body["_rev"], + "orphan_collections": body["orphanCollections"], + "edge_definitions": [ + { + "edge_collection": edge_definition["collection"], + "from_vertex_collections": edge_definition["from"], + "to_vertex_collections": edge_definition["to"], + } + for edge_definition in body["edgeDefinitions"] + ], + } + if "isSmart" in body: + result["smart"] = body["isSmart"] + if "smartGraphAttribute" in body: + result["smart_field"] = body["smartGraphAttribute"] + if "numberOfShards" in body: + result["shard_count"] = body["numberOfShards"] + if "replicationFactor" in body: + result["replication_factor"] = body["replicationFactor"] + + return verify_format(body, result) + + +def format_query_cache_entry(body: Json) -> Json: + """Format AQL query cache entry. + + :param body: Input body. + :type body: dict + :return: Formatted body. + :rtype: dict + """ + result = {} + + if "hash" in body: + result["hash"] = body["hash"] + if "query" in body: + result["query"] = body["query"] + if "bindVars" in body: + result["bind_vars"] = body["bindVars"] + if "size" in body: + result["size"] = body["size"] + if "results" in body: + result["results"] = body["results"] + if "started" in body: + result["started"] = body["started"] + if "hits" in body: + result["hits"] = body["hits"] + if "runTime" in body: + result["runtime"] = body["runTime"] + if "dataSources" in body: + result["data_sources"] = body["dataSources"] + + return verify_format(body, result) diff --git a/aioarango/graph.py b/aioarango/graph.py new file mode 100644 index 000000000..fcc4a2b27 --- /dev/null +++ b/aioarango/graph.py @@ -0,0 +1,905 @@ +from typing import List, Optional, Sequence, Union + +from aioarango.api import ApiGroup +from aioarango.collection import EdgeCollection, VertexCollection +from aioarango.connection import Connection +from aioarango.exceptions import ( + EdgeDefinitionCreateError, + EdgeDefinitionDeleteError, + EdgeDefinitionListError, + EdgeDefinitionReplaceError, + GraphPropertiesError, + GraphTraverseError, + VertexCollectionCreateError, + VertexCollectionDeleteError, + VertexCollectionListError, +) +from aioarango.executor import ApiExecutor +from aioarango.formatter import format_graph_properties +from aioarango.request import Request +from aioarango.response import Response +from aioarango.result import Result +from aioarango.typings import Json, Jsons +from aioarango.utils import get_col_name, get_doc_id + + +class Graph(ApiGroup): + """Graph API wrapper.""" + + def __init__( + self, connection: Connection, executor: ApiExecutor, name: str + ) -> None: + super().__init__(connection, executor) + self._name = name + + def __repr__(self) -> str: + return f"" + + def _get_col_by_vertex(self, vertex: Union[str, Json]) -> VertexCollection: + """Return the vertex collection for the given vertex document. + + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :return: Vertex collection API wrapper. + :rtype: aioarango.collection.VertexCollection + """ + return self.vertex_collection(get_col_name(vertex)) + + def _get_col_by_edge(self, edge: Union[str, Json]) -> EdgeCollection: + """Return the edge collection for the given edge document. + + :param edge: Edge document ID or body with "_id" field. + :type edge: str | dict + :return: Edge collection API wrapper. + :rtype: aioarango.collection.EdgeCollection + """ + return self.edge_collection(get_col_name(edge)) + + @property + def name(self) -> str: + """Return the graph name. + + :return: Graph name. + :rtype: str + """ + return self._name + + async def properties(self) -> Result[Json]: + """Return graph properties. + + :return: Graph properties. + :rtype: dict + :raise aioarango.exceptions.GraphPropertiesError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_graph_properties(resp.body["graph"]) + raise GraphPropertiesError(resp, request) + + return await self._execute(request, response_handler) + + ################################ + # Vertex Collection Management # + ################################ + + async def has_vertex_collection(self, name: str) -> Result[bool]: + """Check if the graph has the given vertex collection. + + :param name: Vertex collection name. + :type name: str + :return: True if vertex collection exists, False otherwise. + :rtype: bool + """ + request = Request( + method="get", + endpoint=f"/_api/gharial/{self._name}/vertex", + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return name in resp.body["collections"] + raise VertexCollectionListError(resp, request) + + return await self._execute(request, response_handler) + + async def vertex_collections(self) -> Result[List[str]]: + """Return vertex collections in the graph that are not orphaned. + + :return: Names of vertex collections that are not orphaned. + :rtype: [str] + :raise aioarango.exceptions.VertexCollectionListError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_api/gharial/{self._name}/vertex", + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise VertexCollectionListError(resp, request) + return sorted(set(resp.body["collections"])) + + return await self._execute(request, response_handler) + + def vertex_collection(self, name: str) -> VertexCollection: + """Return the vertex collection API wrapper. + + :param name: Vertex collection name. + :type name: str + :return: Vertex collection API wrapper. + :rtype: aioarango.collection.VertexCollection + """ + return VertexCollection(self._conn, self._executor, self._name, name) + + async def create_vertex_collection(self, name: str) -> Result[VertexCollection]: + """Create a vertex collection in the graph. + + :param name: Vertex collection name. + :type name: str + :return: Vertex collection API wrapper. + :rtype: aioarango.collection.VertexCollection + :raise aioarango.exceptions.VertexCollectionCreateError: If create fails. + """ + request = Request( + method="post", + endpoint=f"/_api/gharial/{self._name}/vertex", + data={"collection": name}, + ) + + def response_handler(resp: Response) -> VertexCollection: + if resp.is_success: + return self.vertex_collection(name) + raise VertexCollectionCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_vertex_collection(self, name: str, purge: bool = False) -> Result[bool]: + """Remove a vertex collection from the graph. + + :param name: Vertex collection name. + :type name: str + :param purge: If set to True, the vertex collection is not just deleted + from the graph but also from the database completely. + :type purge: bool + :return: True if vertex collection was deleted successfully. + :rtype: bool + :raise aioarango.exceptions.VertexCollectionDeleteError: If delete fails. + """ + request = Request( + method="delete", + endpoint=f"/_api/gharial/{self._name}/vertex/{name}", + params={"dropCollection": purge}, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise VertexCollectionDeleteError(resp, request) + + return await self._execute(request, response_handler) + + ############################## + # Edge Collection Management # + ############################## + + async def has_edge_definition(self, name: str) -> Result[bool]: + """Check if the graph has the given edge definition. + + :param name: Edge collection name. + :type name: str + :return: True if edge definition exists, False otherwise. + :rtype: bool + """ + request = Request(method="get", endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise EdgeDefinitionListError(resp, request) + + body = resp.body["graph"] + return any( + edge_definition["collection"] == name + for edge_definition in body["edgeDefinitions"] + ) + + return await self._execute(request, response_handler) + + async def has_edge_collection(self, name: str) -> Result[bool]: + """Check if the graph has the given edge collection. + + :param name: Edge collection name. + :type name: str + :return: True if edge collection exists, False otherwise. + :rtype: bool + """ + return await self.has_edge_definition(name) + + def edge_collection(self, name: str) -> EdgeCollection: + """Return the edge collection API wrapper. + + :param name: Edge collection name. + :type name: str + :return: Edge collection API wrapper. + :rtype: aioarango.collection.EdgeCollection + """ + return EdgeCollection(self._conn, self._executor, self._name, name) + + async def edge_definitions(self) -> Result[Jsons]: + """Return the edge definitions of the graph. + + :return: Edge definitions of the graph. + :rtype: [dict] + :raise aioarango.exceptions.EdgeDefinitionListError: If retrieval fails. + """ + request = Request(method="get", endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise EdgeDefinitionListError(resp, request) + + body = resp.body["graph"] + return [ + { + "edge_collection": edge_definition["collection"], + "from_vertex_collections": edge_definition["from"], + "to_vertex_collections": edge_definition["to"], + } + for edge_definition in body["edgeDefinitions"] + ] + + return await self._execute(request, response_handler) + + async def create_edge_definition( + self, + edge_collection: str, + from_vertex_collections: Sequence[str], + to_vertex_collections: Sequence[str], + ) -> Result[EdgeCollection]: + """Create a new edge definition. + + An edge definition consists of an edge collection, "from" vertex + collection(s) and "to" vertex collection(s). Here is an example entry: + + .. code-block:: python + + { + 'edge_collection': 'edge_collection_name', + 'from_vertex_collections': ['from_vertex_collection_name'], + 'to_vertex_collections': ['to_vertex_collection_name'] + } + + :param edge_collection: Edge collection name. + :type edge_collection: str + :param from_vertex_collections: Names of "from" vertex collections. + :type from_vertex_collections: [str] + :param to_vertex_collections: Names of "to" vertex collections. + :type to_vertex_collections: [str] + :return: Edge collection API wrapper. + :rtype: aioarango.collection.EdgeCollection + :raise aioarango.exceptions.EdgeDefinitionCreateError: If create fails. + """ + request = Request( + method="post", + endpoint=f"/_api/gharial/{self._name}/edge", + data={ + "collection": edge_collection, + "from": from_vertex_collections, + "to": to_vertex_collections, + }, + ) + + def response_handler(resp: Response) -> EdgeCollection: + if resp.is_success: + return self.edge_collection(edge_collection) + raise EdgeDefinitionCreateError(resp, request) + + return await self._execute(request, response_handler) + + async def replace_edge_definition( + self, + edge_collection: str, + from_vertex_collections: Sequence[str], + to_vertex_collections: Sequence[str], + ) -> Result[EdgeCollection]: + """Replace an edge definition. + + :param edge_collection: Edge collection name. + :type edge_collection: str + :param from_vertex_collections: Names of "from" vertex collections. + :type from_vertex_collections: [str] + :param to_vertex_collections: Names of "to" vertex collections. + :type to_vertex_collections: [str] + :return: Edge collection API wrapper. + :rtype: aioarango.collection.EdgeCollection + :raise aioarango.exceptions.EdgeDefinitionReplaceError: If replace fails. + """ + request = Request( + method="put", + endpoint=f"/_api/gharial/{self._name}/edge/{edge_collection}", + data={ + "collection": edge_collection, + "from": from_vertex_collections, + "to": to_vertex_collections, + }, + ) + + def response_handler(resp: Response) -> EdgeCollection: + if resp.is_success: + return self.edge_collection(edge_collection) + raise EdgeDefinitionReplaceError(resp, request) + + return await self._execute(request, response_handler) + + async def delete_edge_definition(self, name: str, purge: bool = False) -> Result[bool]: + """Delete an edge definition from the graph. + + :param name: Edge collection name. + :type name: str + :param purge: If set to True, the edge definition is not just removed + from the graph but the edge collection is also deleted completely + from the database. + :type purge: bool + :return: True if edge definition was deleted successfully. + :rtype: bool + :raise aioarango.exceptions.EdgeDefinitionDeleteError: If delete fails. + """ + request = Request( + method="delete", + endpoint=f"/_api/gharial/{self._name}/edge/{name}", + params={"dropCollections": purge}, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + raise EdgeDefinitionDeleteError(resp, request) + + return await self._execute(request, response_handler) + + ################### + # Graph Functions # + ################### + + async def traverse( + self, + start_vertex: Union[str, Json], + direction: str = "outbound", + item_order: str = "forward", + strategy: Optional[str] = None, + order: Optional[str] = None, + edge_uniqueness: Optional[str] = None, + vertex_uniqueness: Optional[str] = None, + max_iter: Optional[int] = None, + min_depth: Optional[int] = None, + max_depth: Optional[int] = None, + init_func: Optional[str] = None, + sort_func: Optional[str] = None, + filter_func: Optional[str] = None, + visitor_func: Optional[str] = None, + expander_func: Optional[str] = None, + ) -> Result[Json]: + """Traverse the graph and return the visited vertices and edges. + + :param start_vertex: Start vertex document ID or body with "_id" field. + :type start_vertex: str | dict + :param direction: Traversal direction. Allowed values are "outbound" + (default), "inbound" and "any". + :type direction: str + :param item_order: Item iteration order. Allowed values are "forward" + (default) and "backward". + :type item_order: str + :param strategy: Traversal strategy. Allowed values are "depthfirst" + and "breadthfirst". + :type strategy: str | None + :param order: Traversal order. Allowed values are "preorder", + "postorder", and "preorder-expander". + :type order: str | None + :param edge_uniqueness: Uniqueness for visited edges. Allowed values + are "global", "path" or "none". + :type edge_uniqueness: str | None + :param vertex_uniqueness: Uniqueness for visited vertices. Allowed + values are "global", "path" or "none". + :type vertex_uniqueness: str | None + :param max_iter: If set, halt the traversal after the given number of + iterations. This parameter can be used to prevent endless loops in + cyclic graphs. + :type max_iter: int | None + :param min_depth: Minimum depth of the nodes to visit. + :type min_depth: int | None + :param max_depth: Maximum depth of the nodes to visit. + :type max_depth: int | None + :param init_func: Initialization function in Javascript with signature + ``(config, result) -> void``. This function is used to initialize + values in the result. + :type init_func: str | None + :param sort_func: Sorting function in Javascript with signature + ``(left, right) -> integer``, which returns ``-1`` if ``left < + right``, ``+1`` if ``left > right`` and ``0`` if ``left == right``. + :type sort_func: str | None + :param filter_func: Filter function in Javascript with signature + ``(config, vertex, path) -> mixed``, where ``mixed`` can have one + of the following values (or an array with multiple): "exclude" (do + not visit the vertex), "prune" (do not follow the edges of the + vertex), or "undefined" (visit the vertex and follow its edges). + :type filter_func: str | None + :param visitor_func: Visitor function in Javascript with signature + ``(config, result, vertex, path, connected) -> void``. The return + value is ignored, ``result`` is modified by reference, and + ``connected`` is populated only when parameter **order** is set to + "preorder-expander". + :type visitor_func: str | None + :param expander_func: Expander function in Javascript with signature + ``(config, vertex, path) -> mixed``. The function must return an + array of connections for ``vertex``. Each connection is an object + with attributes "edge" and "vertex". + :type expander_func: str | None + :return: Visited edges and vertices. + :rtype: dict + :raise aioarango.exceptions.GraphTraverseError: If traversal fails. + """ + if strategy is not None: + if strategy.lower() == "dfs": + strategy = "depthfirst" + elif strategy.lower() == "bfs": + strategy = "breadthfirst" + + uniqueness = {} + if vertex_uniqueness is not None: + uniqueness["vertices"] = vertex_uniqueness + if edge_uniqueness is not None: + uniqueness["edges"] = edge_uniqueness + + data: Json = { + "startVertex": get_doc_id(start_vertex), + "graphName": self._name, + "direction": direction, + "strategy": strategy, + "order": order, + "itemOrder": item_order, + "uniqueness": uniqueness or None, + "maxIterations": max_iter, + "minDepth": min_depth, + "maxDepth": max_depth, + "init": init_func, + "filter": filter_func, + "visitor": visitor_func, + "sort": sort_func, + "expander": expander_func, + } + request = Request( + method="post", + endpoint="/_api/traversal", + data={k: v for k, v in data.items() if v is not None}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise GraphTraverseError(resp, request) + + result: Json = resp.body["result"]["visited"] + return result + + return await self._execute(request, response_handler) + + ##################### + # Vertex Management # + ##################### + + async def has_vertex( + self, + vertex: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ) -> Result[bool]: + """Check if the given vertex document exists in the graph. + + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **vertex** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :return: True if vertex document exists, False otherwise. + :rtype: bool + :raise aioarango.exceptions.DocumentGetError: If check fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_vertex(vertex).has(vertex, rev, check_rev) + + async def vertex( + self, + vertex: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ) -> Result[Optional[Json]]: + """Return a vertex document. + + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **vertex** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :return: Vertex document or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_vertex(vertex).get(vertex, rev, check_rev) + + async def insert_vertex( + self, + collection: str, + vertex: Json, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new vertex document. + + :param collection: Vertex collection name. + :type collection: str + :param vertex: New vertex document to insert. If it has "_key" or "_id" + field, its value is used as key of the new vertex (otherwise it is + auto-generated). Any "_rev" field is ignored. + :type vertex: dict + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + return await self.vertex_collection(collection).insert(vertex, sync, silent) + + async def update_vertex( + self, + vertex: Json, + check_rev: bool = True, + keep_none: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Update a vertex document. + + :param vertex: Partial or full vertex document with updated values. It + must contain the "_id" field. + :type vertex: dict + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param keep_none: If set to True, fields with value None are retained + in the document. If set to False, they are removed completely. + :type keep_none: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_vertex(vertex).update( + vertex=vertex, + check_rev=check_rev, + keep_none=keep_none, + sync=sync, + silent=silent, + ) + + async def replace_vertex( + self, + vertex: Json, + check_rev: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace a vertex document. + + :param vertex: New vertex document to replace the old one with. It must + contain the "_id" field. + :type vertex: dict + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_vertex(vertex).replace( + vertex=vertex, check_rev=check_rev, sync=sync, silent=silent + ) + + async def delete_vertex( + self, + vertex: Json, + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + sync: Optional[bool] = None, + ) -> Result[Union[bool, Json]]: + """Delete a vertex document. + + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **vertex** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **vertex** (if given) is + compared against the revision of target vertex document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :return: True if vertex was deleted successfully, False if vertex was + not found and **ignore_missing** was set to True (does not apply in + transactions). + :rtype: bool + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_vertex(vertex).delete( + vertex=vertex, + rev=rev, + check_rev=check_rev, + ignore_missing=ignore_missing, + sync=sync, + ) + + ################### + # Edge Management # + ################### + + async def has_edge( + self, edge: Union[str, Json], rev: Optional[str] = None, check_rev: bool = True + ) -> Result[bool]: + """Check if the given edge document exists in the graph. + + :param edge: Edge document ID or body with "_id" field. + :type edge: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **edge** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :return: True if edge document exists, False otherwise. + :rtype: bool + :raise aioarango.exceptions.DocumentInError: If check fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_edge(edge).has(edge, rev, check_rev) + + async def edge( + self, edge: Union[str, Json], rev: Optional[str] = None, check_rev: bool = True + ) -> Result[Optional[Json]]: + """Return an edge document. + + :param edge: Edge document ID or body with "_id" field. + :type edge: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **edge** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :return: Edge document or None if not found. + :rtype: dict | None + :raise aioarango.exceptions.DocumentGetError: If retrieval fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_edge(edge).get(edge, rev, check_rev) + + async def insert_edge( + self, + collection: str, + edge: Json, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new edge document. + + :param collection: Edge collection name. + :type collection: str + :param edge: New edge document to insert. It must contain "_from" and + "_to" fields. If it has "_key" or "_id" field, its value is used + as key of the new edge document (otherwise it is auto-generated). + Any "_rev" field is ignored. + :type edge: dict + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + return await self.edge_collection(collection).insert(edge, sync, silent) + + async def update_edge( + self, + edge: Json, + check_rev: bool = True, + keep_none: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Update an edge document. + + :param edge: Partial or full edge document with updated values. It must + contain the "_id" field. + :type edge: dict + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param keep_none: If set to True, fields with value None are retained + in the document. If set to False, they are removed completely. + :type keep_none: bool | None + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentUpdateError: If update fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_edge(edge).update( + edge=edge, + check_rev=check_rev, + keep_none=keep_none, + sync=sync, + silent=silent, + ) + + async def replace_edge( + self, + edge: Json, + check_rev: bool = True, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Replace an edge document. + + :param edge: New edge document to replace the old one with. It must + contain the "_id" field. It must also contain the "_from" and "_to" + fields. + :type edge: dict + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentReplaceError: If replace fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_edge(edge).replace( + edge=edge, check_rev=check_rev, sync=sync, silent=silent + ) + + async def delete_edge( + self, + edge: Union[str, Json], + rev: Optional[str] = None, + check_rev: bool = True, + ignore_missing: bool = False, + sync: Optional[bool] = None, + ) -> Result[Union[bool, Json]]: + """Delete an edge document. + + :param edge: Edge document ID or body with "_id" field. + :type edge: str | dict + :param rev: Expected document revision. Overrides the value of "_rev" + field in **edge** if present. + :type rev: str | None + :param check_rev: If set to True, revision of **edge** (if given) is + compared against the revision of target edge document. + :type check_rev: bool + :param ignore_missing: Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + :type ignore_missing: bool + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :return: True if edge was deleted successfully, False if edge was not + found and **ignore_missing** was set to True (does not apply in + transactions). + :rtype: bool + :raise aioarango.exceptions.DocumentDeleteError: If delete fails. + :raise aioarango.exceptions.DocumentRevisionError: If revisions mismatch. + """ + return await self._get_col_by_edge(edge).delete( + edge=edge, + rev=rev, + check_rev=check_rev, + ignore_missing=ignore_missing, + sync=sync, + ) + + async def link( + self, + collection: str, + from_vertex: Union[str, Json], + to_vertex: Union[str, Json], + data: Optional[Json] = None, + sync: Optional[bool] = None, + silent: bool = False, + ) -> Result[Union[bool, Json]]: + """Insert a new edge document linking the given vertices. + + :param collection: Edge collection name. + :type collection: str + :param from_vertex: "From" vertex document ID or body with "_id" field. + :type from_vertex: str | dict + :param to_vertex: "To" vertex document ID or body with "_id" field. + :type to_vertex: str | dict + :param data: Any extra data for the new edge document. If it has "_key" + or "_id" field, its value is used as key of the new edge document + (otherwise it is auto-generated). + :type data: dict + :param sync: Block until operation is synchronized to disk. + :type sync: bool | None + :param silent: If set to True, no document metadata is returned. This + can be used to save resources. + :type silent: bool + :return: Document metadata (e.g. document key, revision) or True if + parameter **silent** was set to True. + :rtype: bool | dict + :raise aioarango.exceptions.DocumentInsertError: If insert fails. + """ + return await self.edge_collection(collection).link( + from_vertex=from_vertex, + to_vertex=to_vertex, + data=data, + sync=sync, + silent=silent, + ) + + async def edges( + self, collection: str, vertex: Union[str, Json], direction: Optional[str] = None + ) -> Result[Json]: + """Return the edge documents coming in and/or out of given vertex. + + :param collection: Edge collection name. + :type collection: str + :param vertex: Vertex document ID or body with "_id" field. + :type vertex: str | dict + :param direction: The direction of the edges. Allowed values are "in" + and "out". If not set, edges in both directions are returned. + :type direction: str + :return: List of edges and statistics. + :rtype: dict + :raise aioarango.exceptions.EdgeListError: If retrieval fails. + """ + return await self.edge_collection(collection).edges(vertex, direction) diff --git a/aioarango/http.py b/aioarango/http.py new file mode 100644 index 000000000..ea592b11c --- /dev/null +++ b/aioarango/http.py @@ -0,0 +1,123 @@ +from abc import ABC, abstractmethod +from typing import MutableMapping, Optional, Tuple + +import httpx + +from aioarango.response import Response +from aioarango.typings import Headers + + +class HTTPClient(ABC): # pragma: no cover + """Abstract base class for HTTP clients.""" + + @abstractmethod + def create_session(self, host: str) -> httpx.AsyncClient: + """Return a new requests session given the host URL. + + This method must be overridden by the user. + + :param host: ArangoDB host URL. + :type host: str + :returns: httpx client object. + :rtype: httpx.AsyncClient + """ + raise NotImplementedError + + @abstractmethod + async def send_request( + self, + session: httpx.AsyncClient, + method: str, + url: str, + headers: Optional[Headers] = None, + params: Optional[MutableMapping[str, str]] = None, + data: Optional[str] = None, + auth: Optional[Tuple[str, str]] = None, + ) -> Response: + """Send an HTTP request. + + This method must be overridden by the user. + + :param session: httpx session object. + :type session: httpx.AsyncClient + :param method: HTTP method in lowercase (e.g. "post"). + :type method: str + :param url: Request URL. + :type url: str + :param headers: Request headers. + :type headers: dict + :param params: URL (query) parameters. + :type params: dict + :param data: Request payload. + :type data: str | None + :param auth: Username and password. + :type auth: tuple + :returns: HTTP response. + :rtype: aioarango.response.Response + """ + raise NotImplementedError + + +class DefaultHTTPClient(HTTPClient): + """Default HTTP client implementation.""" + + REQUEST_TIMEOUT = 60 + RETRY_ATTEMPTS = 3 + + def create_session(self, host: str) -> httpx.AsyncClient: + """Create and return a new session/connection. + + :param host: ArangoDB host URL. + :type host: str | unicode + :returns: httpx client object + :rtype: httpx.AsyncClient + """ + transport = httpx.AsyncHTTPTransport(retries=self.RETRY_ATTEMPTS) + return httpx.AsyncClient(transport=transport) + + async def send_request( + self, + session: httpx.AsyncClient, + method: str, + url: str, + headers: Optional[Headers] = None, + params: Optional[MutableMapping[str, str]] = None, + data: Optional[str] = None, + auth: Optional[Tuple[str, str]] = None, + ) -> Response: + """Send an HTTP request. + + :param session: httpx client object. + :type session: httpx.AsyncClient + :param method: HTTP method in lowercase (e.g. "post"). + :type method: str + :param url: Request URL. + :type url: str + :param headers: Request headers. + :type headers: dict + :param params: URL (query) parameters. + :type params: dict + :param data: Request payload. + :type data: str | None + :param auth: Username and password. + :type auth: tuple + :returns: HTTP response. + :rtype: aioarango.response.Response + """ + response = await session.request( + method=method, + url=url, + params=params, + data=data, + headers=headers, + auth=auth, + timeout=self.REQUEST_TIMEOUT, + ) + return Response( + method=method, + url=str(response.url), + headers=response.headers, + status_code=response.status_code, + status_text=response.reason_phrase, + raw_body=response.text, + ) diff --git a/aioarango/readme.md b/aioarango/readme.md new file mode 100644 index 000000000..97812a143 --- /dev/null +++ b/aioarango/readme.md @@ -0,0 +1,10 @@ +This is a vendored version of [aioarango](https://github.com/mirrorrim/aioarango) +containing only the portions of the code necessary for the Collections project. +`aioarango` appears to be abandoned and is currently forcing the use of outdated dependencies. +The code here has been modified to remove parts of the code we don't use, remove deprecated code, +and for compatibility with updated dependencies. + +If `aioarango` is updated or another async ArangoDB client becomes available, it'd be wise to +switch. + +git hash: eb902b8793e4c2dd4b1d90ed6c7d68f461c73fd3 \ No newline at end of file diff --git a/aioarango/request.py b/aioarango/request.py new file mode 100644 index 000000000..d8985a326 --- /dev/null +++ b/aioarango/request.py @@ -0,0 +1,109 @@ +from typing import Any, MutableMapping, Optional + +from aioarango.typings import Fields, Headers, Params + + +def normalize_headers(headers: Optional[Headers]) -> Headers: + normalized_headers: Headers = { + "charset": "utf-8", + "content-type": "application/json", + } + if headers is not None: + for key, value in headers.items(): + normalized_headers[key.lower()] = value + + return normalized_headers + + +def normalize_params(params: Optional[Params]) -> MutableMapping[str, str]: + normalized_params: MutableMapping[str, str] = {} + + if params is not None: + for key, value in params.items(): + if isinstance(value, bool): + value = int(value) + + normalized_params[key] = str(value) + + return normalized_params + + +class Request: + """HTTP request. + + :param method: HTTP method in lowercase (e.g. "post"). + :type method: str + :param endpoint: API endpoint. + :type endpoint: str + :param headers: Request headers. + :type headers: dict | None + :param params: URL parameters. + :type params: dict | None + :param data: Request payload. + :type data: str | bool | int | float | list | dict | None | MultipartEncoder + :param read: Names of collections read during transaction. + :type read: str | [str] | None + :param write: Name(s) of collections written to during transaction with + shared access. + :type write: str | [str] | None + :param exclusive: Name(s) of collections written to during transaction + with exclusive access. + :type exclusive: str | [str] | None + :param deserialize: Whether the response body can be deserialized. + :type deserialize: bool + + :ivar method: HTTP method in lowercase (e.g. "post"). + :vartype method: str + :ivar endpoint: API endpoint. + :vartype endpoint: str + :ivar headers: Request headers. + :vartype headers: dict | None + :ivar params: URL (query) parameters. + :vartype params: dict | None + :ivar data: Request payload. + :vartype data: str | bool | int | float | list | dict | None + :ivar read: Names of collections read during transaction. + :vartype read: str | [str] | None + :ivar write: Name(s) of collections written to during transaction with + shared access. + :vartype write: str | [str] | None + :ivar exclusive: Name(s) of collections written to during transaction + with exclusive access. + :vartype exclusive: str | [str] | None + :ivar deserialize: Whether the response body can be deserialized. + :vartype deserialize: bool + """ + + __slots__ = ( + "method", + "endpoint", + "headers", + "params", + "data", + "read", + "write", + "exclusive", + "deserialize", + ) + + def __init__( + self, + method: str, + endpoint: str, + headers: Optional[Headers] = None, + params: Optional[Params] = None, + data: Any = None, + read: Optional[Fields] = None, + write: Optional[Fields] = None, + exclusive: Optional[Fields] = None, + deserialize: bool = True, + ) -> None: + self.method = method + self.endpoint = endpoint + self.headers: Headers = normalize_headers(headers) + self.params: MutableMapping[str, str] = normalize_params(params) + self.data = data + self.read = read + self.write = write + self.exclusive = exclusive + self.deserialize = deserialize diff --git a/aioarango/resolver.py b/aioarango/resolver.py new file mode 100644 index 000000000..115aa7357 --- /dev/null +++ b/aioarango/resolver.py @@ -0,0 +1,28 @@ +from abc import ABC, abstractmethod + + +class HostResolver(ABC): # pragma: no cover + """Abstract base class for host resolvers.""" + + @abstractmethod + def get_host_index(self) -> int: + raise NotImplementedError + + +class SingleHostResolver(HostResolver): + """Single host resolver.""" + + def get_host_index(self) -> int: + return 0 + + +class RoundRobinHostResolver(HostResolver): + """Round-robin host resolver.""" + + def __init__(self, host_count: int) -> None: + self._index = -1 + self._count = host_count + + def get_host_index(self) -> int: + self._index = (self._index + 1) % self._count + return self._index diff --git a/aioarango/response.py b/aioarango/response.py new file mode 100644 index 000000000..d5bf4c5f0 --- /dev/null +++ b/aioarango/response.py @@ -0,0 +1,75 @@ +from typing import Any, MutableMapping, Optional + + +class Response: + """HTTP response. + + :param method: HTTP method in lowercase (e.g. "post"). + :type method: str + :param url: API URL. + :type url: str + :param headers: Response headers. + :type headers: MutableMapping + :param status_code: Response status code. + :type status_code: int + :param status_text: Response status text. + :type status_text: str + :param raw_body: Raw response body. + :type raw_body: str + + :ivar method: HTTP method in lowercase (e.g. "post"). + :vartype method: str + :ivar url: API URL. + :vartype url: str + :ivar headers: Response headers. + :vartype headers: MutableMapping + :ivar status_code: Response status code. + :vartype status_code: int + :ivar status_text: Response status text. + :vartype status_text: str + :ivar raw_body: Raw response body. + :vartype raw_body: str + :ivar body: JSON-deserialized response body. + :vartype body: str | bool | int | float | list | dict | None + :ivar error_code: Error code from ArangoDB server. + :vartype error_code: int + :ivar error_message: Error message from ArangoDB server. + :vartype error_message: str + :ivar is_success: True if response status code was 2XX. + :vartype is_success: bool + """ + + __slots__ = ( + "method", + "url", + "headers", + "status_code", + "status_text", + "body", + "raw_body", + "error_code", + "error_message", + "is_success", + ) + + def __init__( + self, + method: str, + url: str, + headers: MutableMapping[str, str], + status_code: int, + status_text: str, + raw_body: str, + ) -> None: + self.method = method.lower() + self.url = url + self.headers = headers + self.status_code = status_code + self.status_text = status_text + self.raw_body = raw_body + + # Populated later + self.body: Any = None + self.error_code: Optional[int] = None + self.error_message: Optional[str] = None + self.is_success: Optional[bool] = None diff --git a/aioarango/result.py b/aioarango/result.py new file mode 100644 index 000000000..e5cd9243b --- /dev/null +++ b/aioarango/result.py @@ -0,0 +1,6 @@ +from typing import TypeVar, Union + + +T = TypeVar("T") + +Result = Union[T, None] diff --git a/aioarango/typings.py b/aioarango/typings.py new file mode 100644 index 000000000..ae849f889 --- /dev/null +++ b/aioarango/typings.py @@ -0,0 +1,7 @@ +from typing import Any, Dict, List, MutableMapping, Sequence, Union + +Json = Dict[str, Any] +Jsons = List[Json] +Params = MutableMapping[str, Union[bool, int, str]] +Headers = MutableMapping[str, str] +Fields = Union[str, Sequence[str]] diff --git a/aioarango/utils.py b/aioarango/utils.py new file mode 100644 index 000000000..c540c2e82 --- /dev/null +++ b/aioarango/utils.py @@ -0,0 +1,76 @@ +import logging +from contextlib import contextmanager +from typing import Any, Iterator, Union + +from aioarango.exceptions import DocumentParseError +from aioarango.typings import Json + + +@contextmanager +def suppress_warning(logger_name: str) -> Iterator[None]: + """Suppress logger messages. + + :param logger_name: Full name of the logger. + :type logger_name: str + """ + logger = logging.getLogger(logger_name) + original_log_level = logger.getEffectiveLevel() + logger.setLevel(logging.CRITICAL) + yield + logger.setLevel(original_log_level) + + +def get_col_name(doc: Union[str, Json]) -> str: + """Return the collection name from input. + + :param doc: Document ID or body with "_id" field. + :type doc: str | dict + :return: Collection name. + :rtype: str + :raise aioarango.exceptions.DocumentParseError: If document ID is missing. + """ + try: + doc_id: str = doc["_id"] if isinstance(doc, dict) else doc + except KeyError: + raise DocumentParseError('field "_id" required') + else: + return doc_id.split("/", 1)[0] + + +def get_doc_id(doc: Union[str, Json]) -> str: + """Return the document ID from input. + + :param doc: Document ID or body with "_id" field. + :type doc: str | dict + :return: Document ID. + :rtype: str + :raise aioarango.exceptions.DocumentParseError: If document ID is missing. + """ + try: + doc_id: str = doc["_id"] if isinstance(doc, dict) else doc + except KeyError: + raise DocumentParseError('field "_id" required') + else: + return doc_id + + +def is_none_or_int(obj: Any) -> bool: + """Check if obj is None or an integer. + + :param obj: Object to check. + :type obj: object + :return: True if object is None or an integer. + :rtype: bool + """ + return obj is None or (isinstance(obj, int) and obj >= 0) + + +def is_none_or_str(obj: Any) -> bool: + """Check if obj is None or a string. + + :param obj: Object to check. + :type obj: object + :return: True if object is None or a string. + :rtype: bool + """ + return obj is None or isinstance(obj, str) diff --git a/aioarango/version.py b/aioarango/version.py new file mode 100644 index 000000000..b794fd409 --- /dev/null +++ b/aioarango/version.py @@ -0,0 +1 @@ +__version__ = '0.1.0' diff --git a/src/service/app.py b/src/service/app.py index 0941f2003..f6fe7c1a5 100644 --- a/src/service/app.py +++ b/src/service/app.py @@ -40,12 +40,15 @@ SERVICE_DESCRIPTION = "A repository of data collections and and associated analyses" +# httpx is super chatty if the root logger is set to INFO +logging.basicConfig(level=logging.WARNING) +logging.getLogger("src").setLevel(logging.INFO) + def create_app(noop=False): """ Create the Collections application """ - logging.basicConfig(level=logging.INFO) # deliberately not documenting noop, should go away when we have real tests if noop: # temporary for prototype status. Eventually need full test suite with diff --git a/src/service/deletion.py b/src/service/deletion.py index fe9e71ca9..19d3be59d 100644 --- a/src/service/deletion.py +++ b/src/service/deletion.py @@ -110,7 +110,7 @@ async def move_match_to_deleted_state( async def _move_matches_to_deletion(deps: PickleableDependencies, subset_age_ms: int): - logging.basicConfig(level=logging.INFO) + _logger().setLevel(level=logging.INFO) _logger().info("Marking matches for deletion") cli, storage = await deps.get_storage() try: @@ -144,7 +144,7 @@ async def move_selection_to_deleted_state( async def _move_selections_to_deletion(deps: PickleableDependencies, subset_age_ms: int): - logging.basicConfig(level=logging.INFO) + _logger().setLevel(level=logging.INFO) _logger().info("Marking selections for deletion") cli, storage = await deps.get_storage() try: @@ -201,7 +201,7 @@ async def _delete_subset( async def _delete_matches(deps: PickleableDependencies): - logging.basicConfig(level=logging.INFO) + _logger().setLevel(level=logging.INFO) _logger().info("Starting match data deletion process") cli, storage = await deps.get_storage() try: @@ -214,7 +214,7 @@ async def proc(m): async def _delete_selections(deps: PickleableDependencies): - logging.basicConfig(level=logging.INFO) + _logger().setLevel(level=logging.INFO) _logger().info("Starting selection data deletion process") cli, storage = await deps.get_storage() try: diff --git a/src/service/processing.py b/src/service/processing.py index 14d2eb2fb..7daed5838 100644 --- a/src/service/processing.py +++ b/src/service/processing.py @@ -62,6 +62,8 @@ def run_async_process(target: Callable, args: list[Any]): def _run_async_process(target: Callable, args: list[Any]): + # otherwise no logger handlers exist anywhere in the tree + logging.basicConfig(level=logging.WARNING) asyncio.run(target(*args))