diff --git a/src/index.ts b/src/index.ts index 4f1747be..8eedf12b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -17,7 +17,6 @@ import { } from "./utils"; import * as middleware from "./middleware"; -import { askBestBlock } from "./services/bestblock"; import { handleSignedTx } from "./services/signedTransaction"; import { handlePoolInfo } from "./services/poolInfo"; import { handleGetAccountState } from "./services/accountState"; @@ -62,7 +61,7 @@ createTransactionOutputView(pool); createUtxoFunctions(pool); createTransactionUtilityFunctions(pool); -const healthChecker = new HealthChecker(() => askBestBlock(pool)); +const healthChecker = new HealthChecker(() => neo.bestblock.getBestBlock()); const router = express(); diff --git a/src/neo4j/bestblock/index.ts b/src/neo4j/bestblock/index.ts index 5a80e1ce..f13fd2f7 100644 --- a/src/neo4j/bestblock/index.ts +++ b/src/neo4j/bestblock/index.ts @@ -1,7 +1,40 @@ import { Driver } from "neo4j-driver"; import { Request, Response } from "express"; +import { CardanoFrag } from "../../Transactions/types"; +import { UtilEither } from "../../utils"; export const bestblock = (driver: Driver) => ({ + getBestBlock: async (): Promise> => { + const session = driver.session(); + + const result = await session.run(` + MATCH (b:Block) + WITH MAX(ID(b)) as max_block_id + + MATCH (block:Block) + WHERE ID(block) = max_block_id + RETURN { + epoch: block.epoch, + slot: block.epoch_slot, + globalSlot: block.slot, + hash: block.hash, + height: block.number + } as block`); + const bestBlock = result.records[0].get(0).properties; + + session.close(); + + return { + kind: "ok", + value: { + epoch: bestBlock.epoch.toNumber(), + slot: bestBlock.slot.toNumber(), + globalSlot: bestBlock.globalSlot.toNumber(), + hash: bestBlock.hash, + height: bestBlock.height.toNumber() + } as any + }; + }, handler: async (_: Request, res: Response) => { const cypher = `MATCH (b:Block) WITH MAX(ID(b)) as max_block_id diff --git a/src/services/assetMintTxs.ts b/src/services/assetMintTxs.ts deleted file mode 100644 index b2adb11d..00000000 --- a/src/services/assetMintTxs.ts +++ /dev/null @@ -1,48 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -const getAssetMintMetadata = async (pool: Pool, fingerprint: string) => { - const query = ` - select encode(ma.policy, 'hex') as policy, - encode(ma.name, 'hex') as asset, - ma.fingerprint, - json_agg( - cast ('{"hash": "' || encode(tx.hash, 'hex') || '", "block": {"slot": ' || block.slot_no || ', "epoch": ' || block.epoch_no || '}}' as jsonb) - || jsonb_build_object('metadata', cast ('{"key": ' || meta.key || '}' as jsonb) - || jsonb_build_object('json', meta.json)) - ) as txs - from ma_tx_mint mint - join multi_asset ma on mint.ident = ma.id - join tx on mint.tx_id = tx.id - join block on tx.block_id = block.id - left join tx_metadata meta on tx.id = meta.tx_id - where ma.fingerprint = $1 - group by ma.policy, - ma.name, - ma.fingerprint;`; - - const results = await pool.query(query, [fingerprint]); - - if (results.rows.length === 0) return null; - const row = results.rows[0]; - - return { - policy: row.policy, - name: row.asset, - txs: row.txs, - }; -}; - -export const handleGetAssetMintTxs = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - if (!req.params.fingerprint) - throw new Error("missing fingerprint in request params"); - - const metadata = await getAssetMintMetadata(pool, req.params.fingerprint); - if (metadata) { - res.send(metadata); - } else { - res.status(404).send(); - } - }; diff --git a/src/services/filterUsedAddress.ts b/src/services/filterUsedAddress.ts deleted file mode 100644 index 867375d3..00000000 --- a/src/services/filterUsedAddress.ts +++ /dev/null @@ -1,122 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; -import { Address } from "@emurgo/cardano-serialization-lib-nodejs"; - -import config from "config"; -import { - assertNever, - validateAddressesReq, - getSpendingKeyHash, - getAddressesByType, -} from "../utils"; -import { decode, encode, toWords } from "bech32"; -import { Prefixes } from "../utils/cip5"; - -const baseQuery = ` - select ( select json_agg((address)) - from tx_out - where tx_id = outertx.tx_id) as outputs, - ( select json_agg((address)) - from tx_out - join tx_in - on tx_in.tx_out_id = tx_out.tx_id - and tx_in.tx_out_index = tx_out.index - where tx_in_id = outertx.tx_id) as inputs - from tx_out as outertx -`; -const filterByAddressQuery = ` - ${baseQuery} - where address = any(($1)::varchar array) -`; -const filterByPaymentCredQuery = ` - ${baseQuery} - where payment_cred = any(($1)::bytea array) -`; - -const addressesRequestLimit: number = config.get("server.addressRequestLimit"); - -export const filterUsedAddresses = - (pool: Pool) => async (req: Request, res: Response) => { - if (!req.body || !req.body.addresses) { - throw new Error("no addresses in request body."); - return; - } - const verifiedAddrs = validateAddressesReq( - addressesRequestLimit, - req.body.addresses - ); - const addressTypes = getAddressesByType(req.body.addresses); - switch (verifiedAddrs.kind) { - case "ok": { - const regularAddresses = [ - ...addressTypes.legacyAddr, - ...addressTypes.bech32, - ]; - - const result: Set = new Set(); - - if (addressTypes.paymentCreds.length > 0) { - // 1) Get all transactions that contain one of these payment keys - const queryResult = await pool.query(filterByPaymentCredQuery, [ - addressTypes.paymentCreds, - ]); - // 2) get all the addresses inside these transactions - const addressesInTxs = queryResult.rows - .flatMap((tx) => [tx.inputs, tx.outputs]) - .flat(); - // 3) get the payment credential for each address in the transaction - const keysInTxs: Array = addressesInTxs.reduce( - (arr, next) => { - try { - decode(next, 1000); // check it's a valid bech32 address - const wasmAddr = Address.from_bech32(next); - const paymentCred = getSpendingKeyHash(wasmAddr); - if (paymentCred != null) arr.push(paymentCred); - wasmAddr.free(); - } catch (_e) { - // silently discard any non-valid Cardano addresses - } - - return arr; - }, - [] as Array - ); - const paymentCredSet = new Set( - addressTypes.paymentCreds.map((str) => str.substring(2)) // cutoff \\x prefix - ); - // 4) filter addresses to the ones we care about for this filterUsed query - keysInTxs - .filter((addr) => paymentCredSet.has(addr)) - .map((addr) => - encode( - Prefixes.PAYMENT_KEY_HASH, - toWords(Buffer.from(addr, "hex")) - ) - ) - .forEach((addr) => result.add(addr)); - } - if (regularAddresses.length > 0) { - // 1) Get all transactions that contain one of these addresses - const queryResult = await pool.query(filterByAddressQuery, [ - regularAddresses, - ]); - // 2) get all the addresses inside these transactions - const addressesInTxs = queryResult.rows - .flatMap((tx) => [tx.inputs, tx.outputs]) - .flat(); - // 3) filter addresses to the ones we care about for this filterUsed query - const addressSet = new Set(regularAddresses); - addressesInTxs - .filter((addr) => addressSet.has(addr)) - .forEach((addr) => result.add(addr)); - } - res.send(Array.from(result)); - return; - } - case "error": - throw new Error(verifiedAddrs.errMsg); - return; - default: - return assertNever(verifiedAddrs); - } - }; diff --git a/src/services/multiAssetSupply.ts b/src/services/multiAssetSupply.ts deleted file mode 100644 index 9d86f827..00000000 --- a/src/services/multiAssetSupply.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -interface Asset { - name: string; - policy: string; -} - -export const handleGetMultiAssetSupply = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - if (!req.body || !req.body.assets) - throw new Error("missing assets on request body"); - if (!Array.isArray(req.body.assets)) - throw new Error("assets should be an array"); - if (req.body.assets.length > 100) - throw new Error("Max limit of 100 assets exceeded."); - if (req.body.assets.length === 0) - throw new Error("assets should not be empty"); - if (req.body.assets.find((a: any) => !a.policy)) - throw new Error("all assets on body should have a name and a policy"); - - const assets: Asset[] = req.body.assets; - - const supplies: { [key: string]: number } = {}; - - await Promise.all( - assets.map(async (asset) => { - const supply = await getMultiAssetSupply(pool, asset); - - const policyAndName = `${asset.policy}.${asset.name}`; - - supplies[policyAndName] = supply; - }) - ); - - res.send({ - supplies, - }); - }; - -const getMultiAssetSupply = async ( - pool: Pool, - asset: Asset -): Promise => { - const query = ` - select sum(mint.quantity) as supply - from multi_asset - join ma_tx_mint mint on multi_asset.id = mint.ident - where - multi_asset.name = ($1)::bytea and multi_asset.policy = decode(($2)::varchar, 'hex')`; - - const result = await pool.query(query, [asset.name, asset.policy]); - - if (result.rows.length === 0) - throw new Error("asset no found: " + asset.name); - - return parseFloat(result.rows[0].supply); -}; diff --git a/src/services/multiAssetTxMint.ts b/src/services/multiAssetTxMint.ts deleted file mode 100644 index 390c98d2..00000000 --- a/src/services/multiAssetTxMint.ts +++ /dev/null @@ -1,108 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -type Asset = - | { - // prefer this one - nameHex: string; - policy: string; - } - | { - // for backward compatibility - name: string; - policy: string; - }; - -interface MultiAssetTxMintMetadata { - key: string; - metadata: any; -} - -const getMultiAssetTxMintMetadata = async (pool: Pool, assets: Asset[]) => { - const query = createGetMultiAssetTxMintMetadataQuery(assets); - - const params = assets.flatMap((a) => { - if ("nameHex" in a) { - return [a.nameHex, a.policy]; - } - if ("name" in a) { - return [a.name, a.policy]; - } - throw new Error("expect nameHex or name in asset parameter"); - }); - - const ret: { [key: string]: MultiAssetTxMintMetadata[] } = {}; - - const results = await pool.query(query, params); - for (const row of results.rows) { - const policyAndName = `${row.policy}.${row.asset}`; - if (!ret[policyAndName]) { - ret[policyAndName] = new Array(); - } - - ret[policyAndName].push({ - key: row.key, - metadata: row.json, - }); - } - - return ret; -}; - -export const handleGetMultiAssetTxMintMetadata = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - if (!req.body || !req.body.assets) - throw new Error("missing assets on request body"); - if (!Array.isArray(req.body.assets)) - throw new Error("assets should be an array"); - if (req.body.assets.length === 0) - throw new Error("assets should not be empty"); - if (req.body.assets.length > 100) - throw new Error("Max limit of 100 assets exceeded."); - if (req.body.assets.find((a: any) => !a.policy)) - throw new Error("all assets on body should have a name and a policy"); - - const assets: Asset[] = req.body.assets; - - const metadata = await getMultiAssetTxMintMetadata(pool, assets); - res.send(metadata); - }; - -function createGetMultiAssetTxMintMetadataQuery(assets: Asset[]) { - const whereConditions = assets - .map((a, idx) => { - let nameMatch; - if ("nameHex" in a) { - nameMatch = `decode(($${idx * 2 + 1})::varchar, 'hex')`; - } else if ("name" in a) { - nameMatch = `($${idx * 2 + 1})::bytea`; - } else { - throw new Error("expect nameHex or name in asset parameter"); - } - return `( ma.name = ${nameMatch} and ma.policy = decode(($${ - idx * 2 + 2 - })::varchar, 'hex') )`; - }) - .join(" or "); - - const query = ` -WITH mint_detail AS ( - SELECT max(mint.id) id, name, policy - FROM ma_tx_mint mint - JOIN multi_asset ma on mint.ident = ma.id - where ${whereConditions} - GROUP BY name, policy), - mint_tx AS ( - SELECT tx_id, name, policy - FROM ma_tx_mint mint - JOIN mint_detail d ON mint.id = d.id) - select encode(mint.policy, 'hex') as policy, - mint.name as asset, - meta.key, - meta.json, mint.tx_id - from mint_tx mint - join tx tx on mint.tx_id = tx.id - join tx_metadata meta on tx.id = meta.tx_id`; - return query; -} diff --git a/src/services/policyIdExists.ts b/src/services/policyIdExists.ts deleted file mode 100644 index 54faa2f6..00000000 --- a/src/services/policyIdExists.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -export const handlePolicyIdExists = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - if (!req.body || !req.body.policyIds) { - throw new Error("error, no policyIds informed."); - } - if (!Array.isArray(req.body.policyIds)) { - throw new Error("'policyIds should be an array."); - } - - const policyIds: string[] = req.body.policyIds; - const fingerprints: string[] = req.body.fingerprints; - - if (policyIds.length > 100) { - throw new Error("Max limit of 100 policyIds exceeded."); - } - - const policyIdDbResult = await pool.query(policyIdQuery, [policyIds]); - - const policyIdResults: { [key: string]: boolean } = {}; - - policyIds.forEach((policyId: string) => { - policyIdResults[policyId] = false; - }); - - policyIdDbResult.rows.forEach((row: any) => { - policyIdResults[row.policy_hex] = true; - }); - - const response: any = { - policyIdResults, - }; - - if (req.body.fingerprints) { - if (!Array.isArray(req.body.fingerprints)) { - throw new Error("'fingerprints should be an array."); - } - - if (fingerprints.length > 100) { - throw new Error("Max limit of 100 fingerprints exceeded."); - } - - const fingerprintDbResult = await pool.query(fingerprintQuery, [ - fingerprints, - ]); - - const fingerprintResults: { [key: string]: boolean } = {}; - - fingerprints.forEach((fingerprint: string) => { - fingerprintResults[fingerprint] = false; - }); - - fingerprintDbResult.rows.forEach((row: any) => { - fingerprintResults[row.fingerprint] = true; - }); - - response.fingerprintResults = fingerprintResults; - } - - res.send(response); - }; - -const policyIdQuery = ` - with filterPolicies as ( - select decode(n, 'hex') from unnest(($1)::varchar array) as n - ) - SELECT encode(policy, 'hex') as policy_hex FROM multi_asset - WHERE policy in (select * from filterPolicies)`; - -const fingerprintQuery = ` - SELECT fingerprint FROM multi_asset - WHERE fingerprint = ANY($1); -`; diff --git a/src/services/tipStatus.ts b/src/services/tipStatus.ts deleted file mode 100644 index 25ce6c90..00000000 --- a/src/services/tipStatus.ts +++ /dev/null @@ -1,120 +0,0 @@ -import config from "config"; - -import { Pool } from "pg"; -import { Request, Response } from "express"; - -const SAFE_BLOCK_DEPTH = parseInt(config.get("safeBlockDifference")); - -const bestBlockQuery = ` - SELECT epoch_no AS "epoch", - epoch_slot_no AS "slot", - slot_no AS "globalSlot", - encode(hash, 'hex') as hash, - block_no AS height - FROM BLOCK - ORDER BY id DESC - LIMIT 1;`; - -const safeBlockQuery = `SELECT epoch_no AS "epoch", - epoch_slot_no AS "slot", - slot_no AS "globalSlot", - encode(hash, 'hex') as hash, - block_no AS height -FROM BLOCK -WHERE block_no <= (SELECT MAX(block_no) FROM block) - ($1)::int -ORDER BY id DESC -LIMIT 1; -`; - -const bestBlockFromReferenceQuery = `SELECT encode(hash, 'hex') as "hash", block_no as "blockNumber" - FROM block - WHERE hash in ( - select decode(n, 'hex') from unnest(($1)::varchar array) as n - ) - AND block_no IS NOT NULL - ORDER BY block_no DESC - LIMIT 1`; - -const safeBlockFromReferenceQuery = `SELECT encode(hash, 'hex') as "hash", block_no as "blockNumber" - FROM block - WHERE hash in ( - select decode(n, 'hex') from unnest(($1)::varchar array) as n - ) - AND block_no IS NOT NULL - AND block_no <= (SELECT MAX(block_no) FROM block) - ($2)::int - ORDER BY block_no DESC - LIMIT 1`; - -const getBestAndSafeBlocks = async ( - pool: Pool -): Promise<{ - safeBlock: string | undefined; - bestBlock: string | undefined; -}> => { - const [bestBlockResult, safeBlockResult] = await Promise.all([ - pool.query(bestBlockQuery), - pool.query(safeBlockQuery, [SAFE_BLOCK_DEPTH]), - ]); - - return { - bestBlock: bestBlockResult.rows[0], - safeBlock: safeBlockResult.rows[0], - }; -}; - -export const handleTipStatusGet = - (pool: Pool) => async (req: Request, res: Response) => { - const result = await getBestAndSafeBlocks(pool); - res.send(result); - }; - -export const handleTipStatusPost = - (pool: Pool) => async (req: Request, res: Response) => { - if (!req.body.reference) { - throw new Error("error, missing reference"); - } - - if (!req.body.reference.bestBlocks) { - throw new Error("error, missing bestBlocks inside reference"); - } - - const bestBlocks: string[] = req.body.reference.bestBlocks; - if (!Array.isArray(bestBlocks)) { - throw new Error("error, bestBlocks should be an array"); - } - - if (bestBlocks.length === 0) { - throw new Error("error, bestBlocks should not be empty"); - } - - const [ - { safeBlock, bestBlock }, - bestBlockFromReferenceResult, - safeBlockFromReferenceResult, - ] = await Promise.all([ - getBestAndSafeBlocks(pool), - pool.query(bestBlockFromReferenceQuery, [bestBlocks]), - pool.query(safeBlockFromReferenceQuery, [bestBlocks, SAFE_BLOCK_DEPTH]), - ]); - - if (bestBlockFromReferenceResult.rowCount === 0) { - throw new Error("REFERENCE_POINT_BLOCK_NOT_FOUND"); - } - - const lastFoundBestBlock: string = - bestBlockFromReferenceResult.rows[0].hash; - if (safeBlockFromReferenceResult.rowCount === 0) { - throw new Error("REFERENCE_POINT_BLOCK_NOT_FOUND"); - } - const lastFoundSafeBlock: string = - safeBlockFromReferenceResult.rows[0].hash; - - res.send({ - safeBlock, - bestBlock, - reference: { - lastFoundSafeBlock, - lastFoundBestBlock, - }, - }); - }; diff --git a/src/services/transactionHistory.ts b/src/services/transactionHistory.ts deleted file mode 100644 index f5cdba73..00000000 --- a/src/services/transactionHistory.ts +++ /dev/null @@ -1,490 +0,0 @@ -import { - errMsgs, - UtilEither, - extractAssets, - getAddressesByType, -} from "../utils"; - -import { - rowToCertificate, - BlockEra, - BlockFrag, - Certificate, - TransInputFrag, - TransOutputFrag, - TransactionFrag, - Asset, -} from "../Transactions/types"; - -import { - GeneralTransactionMetadata, - TransactionMetadatum, - BigNum, -} from "@emurgo/cardano-serialization-lib-nodejs"; - -import { Pool } from "pg"; - -/** - Everything else in this repo is using graphql, so why psql here? - Hasura and the rest of the GraphQL start are _slow_ for this sort of thing. - The psql query generated by Hasura for the equiv GraphQL does several layers - of lateral joins. On my machine, such queries can take as long as 41s to run. - This SQL is fast, averaging about 10ms (though, clearly, the time scales poorly - with the number of results, as you can see by the subqueries in the select clause. - As we anticipate cardano-graphql being able to handle this in the future, I have - left the interface to match what graphql would do. For posterity's sake, I have - also left the original GraphQL query in this file. - Beware! The GraphQL query never passed tests, and doesn't pull blockindex/tx_ordinal/tx_index. -**/ -const askTransactionSqlQuery = ` - with - hashes as ( - select distinct hash - from ( - ${/* 1.1) Get all inputs for the transaction */ ""} - - select tx.hash as hash - FROM tx - - JOIN tx_in - ON tx_in.tx_in_id = tx.id - - ${ - /** - note: input table doesn't contain addresses directly - so to check for all inputs that use address X - we have to check the address for all outputs that occur in the input table - **/ "" - } - JOIN tx_out source_tx_out - ON tx_in.tx_out_id = source_tx_out.tx_id - AND tx_in.tx_out_index::smallint = source_tx_out.index::smallint - - JOIN tx source_tx - ON source_tx_out.tx_id = source_tx.id - - WHERE source_tx_out.address = ANY(($1)::varchar array) - OR source_tx_out.payment_cred = ANY(($6)::bytea array) - - UNION - - ${/* 1.2) Get all collateral inputs for the transaction */ ""} - - select tx.hash as hash - FROM tx - - JOIN collateral_tx_in - ON collateral_tx_in.tx_in_id = tx.id - - ${ - /** - note: input table doesn't contain addresses directly - so to check for all inputs that use address X - we have to check the address for all outputs that occur in the input table - **/ "" - } - JOIN tx_out source_tx_out - ON collateral_tx_in.tx_out_id = source_tx_out.tx_id - AND collateral_tx_in.tx_out_index::smallint = source_tx_out.index::smallint - - JOIN tx source_tx - ON source_tx_out.tx_id = source_tx.id - - WHERE source_tx_out.address = ANY(($1)::varchar array) - OR source_tx_out.payment_cred = ANY(($6)::bytea array) - - UNION - ${/* 2) Get all outputs for the transaction */ ""} - select tx.hash as hash - from tx - - JOIN tx_out - on tx.id = tx_out.tx_id - - where tx_out.address = ANY(($1)::varchar array) - or tx_out.payment_cred = ANY(($6)::bytea array) - - UNION - ${/* 2.1) Get all collateral outputs for the transaction */ ""} - select tx.hash as hash - from tx - - JOIN collateral_tx_out - on tx.id = collateral_tx_out.tx_id - - where collateral_tx_out.address = ANY(($1)::varchar array) - or collateral_tx_out.payment_cred = ANY(($6)::bytea array) - - UNION - ${/* 3) Get all certificates for the transaction */ ""} - select tx.hash as hash - from tx - - JOIN combined_certificates as certs - on tx.id = certs."txId" - where - ( - certs."formalType" in ('CertRegKey', 'CertDeregKey','CertDelegate') - and certs."stakeCred" = any( - ${ - /* stakeCred is encoded as a string, so we have to convert from a byte array to a hex string */ "" - } - (SELECT array_agg(encode(addr, 'hex')) from UNNEST($7::bytea array) as addr)::varchar array - ) - ) or ( - ${ - /* note: PoolRetirement only contains pool key hash, so no way to map it to an address */ "" - } - certs."formalType" in ('CertRegPool') - and certs."poolParamsRewardAccount" = any( - ${ - /* poolParamsRewardAccount is encoded as a string, so we have to convert from a byte array to a hex string */ "" - } - (SELECT array_agg(encode(addr, 'hex')) from UNNEST($7::bytea array) as addr)::varchar array - ) - ) - - UNION - ${/* 4) Get all withdrawals for the transaction */ ""} - - select tx.hash as hash - from tx - - JOIN withdrawal as w - on tx.id = w."tx_id" - - JOIN stake_address as addr - on w.addr_id = addr.id - - where addr.hash_raw = any(($7)::bytea array) - ) hashes - ) - select tx.hash - , tx.fee - , tx.valid_contract - , tx.script_size - , (select jsonb_object_agg(key, bytes) - from tx_metadata - where tx_metadata.tx_id = tx.id) as metadata - , tx.block_index as "txIndex" - , block.block_no as "blockNumber" - , block.hash as "blockHash" - , block.epoch_no as "blockEpochNo" - , block.slot_no as "blockSlotNo" - , block.epoch_slot_no as "blockSlotInEpoch" - , case when vrf_key is null then 'byron' - else 'shelley' end - as "blockEra" - , block.time at time zone 'UTC' as "includedAt" - , (select json_agg(( source_tx_out.address - , source_tx_out.value - , encode(source_tx.hash, 'hex') - , tx_in.tx_out_index - , (select json_agg(ROW(encode("ma"."policy", 'hex'), encode("ma"."name", 'hex'), "quantity")) - from ma_tx_out - inner join multi_asset ma on ma_tx_out.ident = ma.id - WHERE ma_tx_out."tx_out_id" = source_tx_out.id) - ) order by tx_in.id asc) as inAddrValPairs - FROM tx inadd_tx - JOIN tx_in - ON tx_in.tx_in_id = inadd_tx.id - JOIN tx_out source_tx_out - ON tx_in.tx_out_id = source_tx_out.tx_id AND tx_in.tx_out_index::smallint = source_tx_out.index::smallint - JOIN tx source_tx - ON source_tx_out.tx_id = source_tx.id - where inadd_tx.hash = tx.hash) as "inAddrValPairs" - , (select json_agg(( source_tx_out.address - , source_tx_out.value - , encode(source_tx.hash, 'hex') - , collateral_tx_in.tx_out_index - , (select json_agg(ROW(encode("ma"."policy", 'hex'), encode("ma"."name", 'hex'), "quantity")) - from ma_tx_out - inner join multi_asset ma on ma_tx_out.ident = ma.id - WHERE ma_tx_out."tx_out_id" = source_tx_out.id) - ) order by collateral_tx_in.id asc) as collateralInAddrValPairs - FROM tx inadd_tx - JOIN collateral_tx_in - ON collateral_tx_in.tx_in_id = inadd_tx.id - JOIN tx_out source_tx_out - ON collateral_tx_in.tx_out_id = source_tx_out.tx_id AND collateral_tx_in.tx_out_index::smallint = source_tx_out.index::smallint - JOIN tx source_tx - ON source_tx_out.tx_id = source_tx.id - where inadd_tx.hash = tx.hash) as "collateralInAddrValPairs" - , (select json_agg(( - "address", - "value", - "txDataHash", - (select json_agg(ROW(encode("ma"."policy", 'hex'), encode("ma"."name", 'hex'), "quantity")) - FROM ma_tx_out - inner join multi_asset ma on ma_tx_out.ident = ma.id - JOIN tx_out token_tx_out - ON tx.id = token_tx_out.tx_id - WHERE ma_tx_out."tx_out_id" = token_tx_out.id AND hasura_to."address" = token_tx_out.address AND hasura_to.index = token_tx_out.index) - ) order by "index" asc) as outAddrValPairs - from "TransactionOutput" hasura_to - where hasura_to."txHash" = tx.hash) as "outAddrValPairs" - , (select json_agg(( - "address", - "value", - "txDataHash" - ) order by "index" asc) as collateralOutAddrValPairs - from "CollateralTransactionOutput" hasura_to - where hasura_to."txHash" = tx.hash) as "collateralOutAddrValPairs" - , (select json_agg((encode(addr."hash_raw",'hex'), "amount") order by w."id" asc) - from withdrawal as w - join stake_address as addr - on addr.id = w.addr_id - where w.tx_id = tx.id) as withdrawals - , (select json_agg(row_to_json(combined_certificates) order by "certIndex" asc) - from combined_certificates - where "txId" = tx.id) as certificates - - from tx - - JOIN hashes - on hashes.hash = tx.hash - - JOIN block - on block.id = tx.block_id - - LEFT JOIN pool_metadata_ref - on tx.id = pool_metadata_ref.registered_tx_id - - where - ${/* is within untilBlock (inclusive) */ ""} - block.block_no <= $2 - and ( - ${/* Either: */ ""} - ${/* 1) comes in block strict after the "after" field */ ""} - block.block_no > $3 - or - ${ - /* 2) Is in the same block as the "after" field, but is tx that appears afterwards */ "" - } - (block.block_no = $3 and tx.block_index > $4) - ) - - order by block.time asc, tx.block_index asc - limit $5; -`; - -const MAX_INT = "2147483647"; - -function buildMetadataObj( - metadataMap: null | Record -): null | string { - if (metadataMap == null) return null; - const metadataWasm = GeneralTransactionMetadata.new(); - for (const key of Object.keys(metadataMap)) { - const keyWasm = BigNum.from_str(key); - // the cbor inserted into SQL is not the full metadata for the transaction - // instead, each row is a CBOR map with a single entry - const singletonMap = TransactionMetadatum.from_bytes( - Buffer.from( - // need to cutoff the \\x prefix added by SQL - metadataMap[key].substring(2), - "hex" - ) - ); - const map = singletonMap.as_map(); - const keys = map.keys(); - for (let i = 0; i < keys.len(); i++) { - const cborKey = keys.get(i); - const datumWasm = map.get(cborKey); - metadataWasm.insert(keyWasm, datumWasm); - datumWasm.free(); - cborKey.free(); - } - keyWasm.free(); - singletonMap.free(); - map.free(); - keys.free(); - } - const result = Buffer.from(metadataWasm.to_bytes()).toString("hex"); - metadataWasm.free(); - - return result; -} - -export const askTransactionHistory = async ( - pool: Pool, - limit: number, - addresses: string[], - after: { - blockNumber: number; - txIndex: number; - }, - untilNum: number -): Promise> => { - const addressTypes = getAddressesByType(addresses); - const ret = await pool.query(askTransactionSqlQuery, [ - [...addressTypes.legacyAddr, ...addressTypes.bech32], - untilNum, - after.blockNumber, - after.txIndex, - limit, - addressTypes.paymentCreds, - addressTypes.stakingKeys, - ]); - const txs = ret.rows.map((row: any): TransactionFrag => { - const inputs = row.inAddrValPairs - ? row.inAddrValPairs.map( - (obj: any): TransInputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - id: obj.f3.concat(obj.f4.toString()), - index: obj.f4, - txHash: obj.f3, - assets: extractAssets(obj.f5), - }) - ) - : []; - const collateralInputs = row.collateralInAddrValPairs - ? row.collateralInAddrValPairs.map( - (obj: any): TransInputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - id: obj.f3.concat(obj.f4.toString()), - index: obj.f4, - txHash: obj.f3, - assets: extractAssets(obj.f5), - }) - ) - : []; - const outputs = row.outAddrValPairs - ? row.outAddrValPairs.map( - (obj: any): TransOutputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - dataHash: obj.f3?.toString() ?? null, - assets: extractAssets(obj.f4), - }) - ) - : []; - const collateralOutputs = row.collateralOutAddrValPairs - ? row.collateralOutAddrValPairs.map( - (obj: any): TransOutputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - dataHash: obj.f3?.toString() ?? null, - assets: [], - }) - ) - : []; - const withdrawals: TransOutputFrag[] = row.withdrawals - ? row.withdrawals.map( - (obj: any): TransOutputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - dataHash: null, - assets: [] as Asset[], - }) - ) - : []; - const certificates = - row.certificates !== null - ? row.certificates - .map(rowToCertificate) - .filter((i: Certificate | null) => i !== null) - : []; - const blockFrag: BlockFrag = { - number: row.blockNumber, - hash: row.blockHash.toString("hex"), - epochNo: row.blockEpochNo, - slotNo: row.blockSlotInEpoch, - }; - - return { - hash: row.hash.toString("hex"), - block: blockFrag, - validContract: row.valid_contract, - scriptSize: row.script_size, - fee: row.fee.toString(), - metadata: buildMetadataObj(row.metadata), - includedAt: row.includedAt, - inputs: inputs, - collateralInputs: collateralInputs, - outputs: outputs, - collateralOutputs: collateralOutputs, - ttl: MAX_INT, // https://github.com/input-output-hk/cardano-db-sync/issues/212 - blockEra: row.blockEra === "byron" ? BlockEra.Byron : BlockEra.Shelley, - txIndex: row.txIndex, - withdrawals: withdrawals, - certificates: certificates, - }; - }); - - return { kind: "ok", value: txs }; - //if('data' in ret && 'data' in ret.data && 'transactions' in ret.data.data) - // return {'kind':'ok', value:ret.data.data.transactions}; - //else - // return {'kind':'error', errMsg:'TxsHistory, could not understand graphql response'}; -}; - -export interface BlockNumByTxHashFrag { - block: BlockByTxHashFrag; - hash: string; - blockIndex: number; // this is actually the index of the transaction in the block -} -interface BlockByTxHashFrag { - hash: string; - number: number; -} - -const askBlockNumByTxHashQuery = ` - SELECT "tx"."hash" AS "hash", "tx"."block_index" as "blockIndex", "Block"."block_no" AS "blockNumber", "Block"."hash" AS "blockHash" - FROM "tx" - LEFT JOIN "block" "Block" ON "tx"."block_id" = "Block"."id" - WHERE "tx"."hash"=decode($1, 'hex') -`; - -export const askBlockNumByTxHash = async ( - pool: Pool, - hash: string | undefined -): Promise> => { - if (!hash) return { kind: "error", errMsg: errMsgs.noValue }; - - try { - const res = await pool.query(askBlockNumByTxHashQuery, [hash]); - return { - kind: "ok", - value: { - block: { - hash: res.rows[0].blockHash.toString("hex"), - number: res.rows[0].blockNumber, - }, - hash: res.rows[0].hash.toString("hex"), - blockIndex: res.rows[0].blockIndex, - }, - }; - } catch (err: any) { - const errString = err.stack + ""; - return { kind: "error", errMsg: "askBlockNumByTxHash error: " + errString }; - } -}; - -const askBlockNumByHashQuery = ` - SELECT "block"."block_no" AS "blockNumber" - FROM "block" - WHERE "block"."hash"=decode($1, 'hex') -`; - -export const askBlockNumByHash = async ( - pool: Pool, - hash: string -): Promise> => { - if (!hash) return { kind: "error", errMsg: errMsgs.noValue }; - - try { - const res = await pool.query(askBlockNumByHashQuery, [hash]); - if (res.rows.length === 0) - return { kind: "error", errMsg: errMsgs.noValue }; - return { - kind: "ok", - value: res.rows[0].blockNumber, - }; - } catch (err: any) { - const errString = err.stack + ""; - return { kind: "error", errMsg: "askBlockNumByHash error: " + errString }; - } -}; diff --git a/src/services/transactions.ts b/src/services/transactions.ts deleted file mode 100644 index e3e638ed..00000000 --- a/src/services/transactions.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -import { - mapTransactionFragToResponse, - mapTxRowsToTransactionFrags, -} from "../utils/mappers"; - -const transactionsQuery = ` -SELECT tx.hash - , tx.fee - , tx.valid_contract - , tx.script_size - , tx_metadata_agg(tx.id) as metadata - , tx.block_index as "txIndex" - , block.block_no as "blockNumber" - , block.hash as "blockHash" - , block.epoch_no as "blockEpochNo" - , block.slot_no as "blockSlotNo" - , block.epoch_slot_no as "blockSlotInEpoch" - , block_era_from_vrf_key(vrf_key) as "blockEra" - , block.time at time zone 'UTC' as "includedAt" - , in_addr_val_pairs(tx.hash) as "inAddrValPairs" - , collateral_in_addr_val_pairs(tx.hash) as "collateralInAddrValPairs" - , withdraws_agg(tx.id) as "withdrawals" - , certificates_agg(tx.id) as "certificates" - , out_addr_val_pairs(tx.id, tx.hash) as "outAddrValPairs" -FROM tx - INNER JOIN block on tx.block_id = block.id - WHERE tx.hash in ( - select decode(n, 'hex') from unnest(($1)::varchar array) as n - ); -`; - -export const handleGetTransactions = - (pool: Pool) => async (req: Request, res: Response) => { - if (!req.body || !req.body.txHashes) { - throw new Error("error, no tx txHashes informed."); - } - if (!Array.isArray(req.body.txHashes)) { - throw new Error("'txHashes' should be an array."); - } - const txHashes: string[] = req.body.txHashes; - if (txHashes.length > 100) { - throw new Error("Max limit of 100 tx txHashes exceeded."); - } - if (txHashes.length === 0) { - throw new Error("error, at least 1 tx id should be informed."); - } - - const result = await pool.query(transactionsQuery, [txHashes]); - const txs = mapTxRowsToTransactionFrags(result.rows); - const responseObj: { [key: string]: any } = {}; - for (const tx of txs) { - responseObj[tx.hash] = mapTransactionFragToResponse(tx); - } - - res.send(responseObj); - }; diff --git a/src/services/txIO.ts b/src/services/txIO.ts deleted file mode 100644 index 78d276de..00000000 --- a/src/services/txIO.ts +++ /dev/null @@ -1,111 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; -import { TransInputFrag, TransOutputFrag } from "../Transactions/types"; -import { extractAssets } from "../utils"; - -const QUERY_IO = ` - select - in_addr_val_pairs(tx.hash), - collateral_in_addr_val_pairs(tx.hash), - out_addr_val_pairs(tx.id, tx.hash) - from tx - where tx.hash = decode($1, 'hex') - limit 1;`; - -const QUERY_OUT = ` - select out_addr_val_pairs(tx.id, tx.hash)->$2::int as "out" - from tx - where tx.hash = decode($1, 'hex') - limit 1;`; - -export const handleGetTxIO = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - const tx_hash = (req.params.tx_hash || "").trim(); - if (tx_hash.length <= 0) { - res.status(400).send(`missing tx_hash on request path: [${tx_hash}]`); - return; - } - - const result = await pool.query(QUERY_IO, [tx_hash]); - - if (result.rowCount === 0) { - res.status(404).send("Transaction not found"); - return; - } - - const inputs = result.rows[0].in_addr_val_pairs || []; - const collateralInputs = result.rows[0].collateral_in_addr_val_pairs || []; - const outputs = result.rows[0].out_addr_val_pairs || []; - - res.send({ - inputs: inputs.map( - (obj: any): TransInputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - id: obj.f3.concat(obj.f4.toString()), - index: obj.f4, - txHash: obj.f3, - assets: extractAssets(obj.f5), - }) - ), - collateralInputs: collateralInputs.map( - (obj: any): TransInputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - id: obj.f3.concat(obj.f4.toString()), - index: obj.f4, - txHash: obj.f3, - assets: extractAssets(obj.f5), - }) - ), - outputs: outputs.map( - (obj: any): TransOutputFrag => ({ - address: obj.f1, - amount: obj.f2.toString(), - dataHash: obj.f3, - assets: extractAssets(obj.f4), - }) - ), - }); - }; - -export const handleGetTxOutput = - (pool: Pool) => - async (req: Request, res: Response): Promise => { - const tx_hash = (req.params.tx_hash || "").trim(); - const output_index = parseInt(req.params.index); - if (tx_hash.length <= 0) { - res.status(400).send(`missing tx_hash on request path: [${tx_hash}]`); - return; - } - if (output_index == null || !(output_index >= 0)) { - res - .status(400) - .send(`missing or incorrect index on request path: [${output_index}]`); - return; - } - - const result = await pool.query(QUERY_OUT, [tx_hash, output_index]); - - if (result.rowCount === 0) { - res.status(404).send("Transaction not found"); - return; - } - - const obj = result.rows[0].out; - - if (obj == null) { - res.status(400).send("Output index out of bounds"); - return; - } - - res.send({ - output: { - address: obj.f1, - amount: obj.f2.toString(), - dataHash: obj.f3, - assets: extractAssets(obj.f4), - }, - }); - }; diff --git a/src/services/utxoAtPoint.ts b/src/services/utxoAtPoint.ts deleted file mode 100644 index a7690d79..00000000 --- a/src/services/utxoAtPoint.ts +++ /dev/null @@ -1,111 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -import config from "config"; -import { - assertNever, - validateAddressesReq, - getAddressesByType, - extractAssets, -} from "../utils"; - -import { getBlock } from "../utils/queries/block"; - -const addressesRequestLimit: number = config.get("server.addressRequestLimit"); - -const utxoAtPointQuery = `SELECT tx_out.address, - tx_out.payment_cred, - encode(tx.hash::bytea, 'hex'::text) AS hash, - tx_out.index, - tx_out.value, - block.block_no AS "blockNumber", - ( - SELECT json_agg( - ROW ( - encode(multi_asset.policy::bytea, 'hex'::text), - encode(multi_asset.name::bytea, 'hex'::text), - ma_tx_out.quantity) - ) AS json_agg - FROM ma_tx_out - INNER JOIN multi_asset on ma_tx_out.ident = multi_asset.id - WHERE ma_tx_out.tx_out_id = tx_out.id - ) AS assets -FROM tx - JOIN tx_out ON tx.id = tx_out.tx_id - JOIN block ON block.id = tx.block_id -WHERE tx.valid_contract - AND block.block_no <= ($3)::word31type - AND NOT utxo_used_as_invalid_collateral(tx_out.tx_id, tx_out.index::smallint, ($3)::word31type) - AND NOT utxo_used_as_valid_input(tx_out.tx_id, tx_out.index::smallint, ($3)::word31type) - AND ( - tx_out.address = any(($1)::varchar array) - OR payment_cred = any(($2)::bytea array) - ) -ORDER BY tx.hash -LIMIT $4::word31type OFFSET $5::word31type; -`; - -export const utxoAtPoint = - (pool: Pool) => async (req: Request, res: Response) => { - if (!req.body || !req.body.addresses) { - throw new Error("error, no addresses."); - } - if (!req.body || !req.body.addresses) { - throw new Error("error, no addresses."); - } - if (!req.body.referenceBlockHash) { - throw new Error("error, missing the `referenceBlockHash`."); - } - - const page = parseInt(req.body.page); - const pageSize = parseInt(req.body.pageSize); - - if (isNaN(page) || isNaN(pageSize)) { - throw new Error("error, page and pageSize should be numbers."); - } - - if (page <= 0 || pageSize <= 0) { - throw new Error("error, page and pageSize should be pasitive integers."); - } - - const offset = (page - 1) * pageSize; - - const addressTypes = getAddressesByType(req.body.addresses); - const verifiedAddresses = validateAddressesReq( - addressesRequestLimit, - req.body.addresses - ); - switch (verifiedAddresses.kind) { - case "ok": { - const referenceBlock = await getBlock(pool)( - req.body.referenceBlockHash - ); - if (!referenceBlock) { - throw new Error("REFERENCE_POINT_BLOCK_NOT_FOUND"); - } - - const result = await pool.query(utxoAtPointQuery, [ - [...addressTypes.legacyAddr, ...addressTypes.bech32], - addressTypes.paymentCreds, - referenceBlock.number, - pageSize, - offset, - ]); - const utxos = result.rows.map((utxo) => ({ - utxo_id: `${utxo.hash}:${utxo.index}`, - tx_hash: utxo.hash, - tx_index: utxo.index, - receiver: utxo.address, - amount: utxo.value.toString(), - assets: extractAssets(utxo.assets), - block_num: utxo.blockNumber, - })); - res.send(utxos); - return; - } - case "error": - throw new Error(verifiedAddresses.errMsg); - default: - return assertNever(verifiedAddresses); - } - }; diff --git a/src/services/utxoDiffSincePoint.ts b/src/services/utxoDiffSincePoint.ts deleted file mode 100644 index fba25f98..00000000 --- a/src/services/utxoDiffSincePoint.ts +++ /dev/null @@ -1,433 +0,0 @@ -import config from "config"; - -import { Pool } from "pg"; -import { Request, Response } from "express"; - -import { getBlock } from "../utils/queries/block"; -import { getTransactionRowByHash } from "../utils/queries/transaction"; -import { - assertNever, - validateAddressesReq, - getAddressesByType, - extractAssets, -} from "../utils"; - -const addressesRequestLimit: number = config.get("server.addressRequestLimit"); - -enum DiffItemType { - INPUT = "input", - COLLATERAL = "collateral", - OUTPUT = "output", -} - -const extractBodyParameters = async ( - body: any -): Promise<{ - addresses: string[]; - untilBlockHash: string; - afterPoint: { - blockHash: string; - paginationPointType: DiffItemType | null; - // if paginationPointType is not null, these two fields must be present - txHash?: string; - /* - if paginationPointType is INPUT, this is `tx_in.id`; - if paginationPointType is COLLATERAL, this is `collateral_tx_id.id`; - if paginationPointType is OUTPU, this is `tx_out.index`; - */ - paginationPointValue?: string; - }; - diffLimit: number; -}> => { - if (!body) { - throw new Error("error, missing request body."); - } - - const addresses: string[] = body.addresses; - if (!addresses || addresses.length === 0) { - throw new Error("error, no addresses."); - } - - const untilBlockHash: string = body.untilBlockHash; - if (!body.untilBlockHash) { - throw new Error("error, no untilBlockHash."); - } - - if (!body.diffLimit) { - throw new Error("error, no diffLimit."); - } - - const diffLimit: number = body.diffLimit; - - const afterPoint: { - blockHash: string; - paginationPointType: DiffItemType | null; - txHash?: string; - paginationPointValue?: string; - } = body.afterPoint; - - if (!afterPoint) { - throw new Error("error, empty afterBestBlocks."); - } - - if (!afterPoint.blockHash) { - throw new Error("error, missing blockHash in afterPoint."); - } - - if (!afterPoint.paginationPointType) { - afterPoint.paginationPointType = null; - } else if ( - ![ - DiffItemType.INPUT, - DiffItemType.COLLATERAL, - DiffItemType.OUTPUT, - ].includes(afterPoint.paginationPointType) - ) { - throw new Error("error, unexpected paginationPointType in afterPoint."); - } - - if ( - afterPoint.paginationPointType !== null && - typeof afterPoint.txHash !== "string" - ) { - throw new Error("error, missing txHash in afterPoint."); - } - - if ( - afterPoint.paginationPointType !== null && - typeof afterPoint.paginationPointValue !== "string" - ) { - throw new Error("error, missing paginationPointValue in afterPoint."); - } - - return { - addresses, - untilBlockHash, - afterPoint, - diffLimit, - }; -}; - -const buildSelectColumns = (diffItemType: DiffItemType) => { - let paginationPointColumn; - if (diffItemType === DiffItemType.INPUT) { - paginationPointColumn = "tx_in.id"; - } else if (diffItemType === DiffItemType.COLLATERAL) { - paginationPointColumn = "collateral_tx_in.id"; - } else { - paginationPointColumn = "tx_out.index"; - } - - return `SELECT tx_out.address - , encode(block.hash, 'hex') as "blockHash" - , tx_out.address - , tx_out.payment_cred - , encode(tx.hash,'hex') as hash - , tx_out.index - , tx_out.value - , block.block_no as "blockNumber" - , tx.block_index as "blockIndex" - , ( - select json_agg(ROW (encode(multi_asset."policy", 'hex'), encode(multi_asset."name", 'hex'), "quantity")) - from ma_tx_out - inner join multi_asset on ma_tx_out.ident = multi_asset.id - where ma_tx_out."tx_out_id" = tx_out.id - ) as assets - , '${diffItemType}' as "diffItemType" - , ${paginationPointColumn} as "paginationPointValue"`; -}; - -const buildSelectFromForInputs = () => { - return `FROM tx - INNER JOIN block ON tx.block_id = block.id - INNER JOIN tx_in ON tx.id = tx_in.tx_in_id - INNER JOIN tx_out - ON tx_out.tx_id = tx_in.tx_out_id - AND tx_out.index::smallint = tx_in.tx_out_index::smallint - INNER JOIN tx src_tx - ON tx_out.tx_id = src_tx.id`; -}; - -const buildSelectFromForCollaterals = () => { - return `FROM tx - INNER JOIN block ON tx.block_id = block.id - INNER JOIN collateral_tx_in ON tx.id = collateral_tx_in.tx_in_id - INNER JOIN tx_out - ON tx_out.tx_id = collateral_tx_in.tx_out_id - AND tx_out.index::smallint = collateral_tx_in.tx_out_index::smallint - INNER JOIN tx src_tx - ON tx_out.tx_id = src_tx.id`; -}; - -const buildSelectFromForOutputs = () => { - return `FROM tx - INNER JOIN block ON tx.block_id = block.id - INNER JOIN tx_out ON tx.id = tx_out.tx_id`; -}; - -const buildWhereClause = ( - validContract: boolean, - // for which diff item we are building the where clause - diffItemType: DiffItemType, - // the type of pagination point passed in by the client - paginationPoinType: DiffItemType | null -) => { - let linearizedOrderCond; - if (paginationPoinType === null) { - linearizedOrderCond = "(block.block_no > ($4)::word31type)"; - } else if (paginationPoinType === DiffItemType.INPUT) { - // the linear order is input < collateral < output - if (diffItemType === DiffItemType.INPUT) { - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs */ - block.block_no = ($4)::word31type - AND tx.block_index > ($6)::word31type - ) OR ( - /* the same tx, following inputs */ - block.block_no = ($4)::word31type - AND tx.block_index = ($6)::word31type - AND tx_in.id > ($5)::integer - ) - )`; - } else { - // diffItemType === DiffItemType.COLLATERAL || diffItemType === DiffItemType.OUTPUT - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs, - or the same tx (because collaterals and outputs follow inputs) - */ - block.block_no = ($4)::word31type - AND tx.block_index >= ($6)::word31type - ) - )`; - } - } else if (paginationPoinType === DiffItemType.COLLATERAL) { - if (diffItemType === DiffItemType.INPUT) { - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs */ - block.block_no = ($4)::word31type - AND tx.block_index > ($6)::word31type - ) /* because collaterals follow input, inputs of the same tx are before the pagination point */ - )`; - } else if (diffItemType === DiffItemType.COLLATERAL) { - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs */ - block.block_no = ($4)::word31type - AND tx.block_index > ($6)::word31type - ) OR ( - /* the same tx, following collaterals */ - block.block_no = ($4)::word31type - AND tx.block_index = ($6)::word31type - AND collateral_tx_in.id > ($5)::integer - ) - )`; - } else { - // diffItemType === DiffItemType.OUTPUT - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs, - or the same tx (because outputs follow collaterals) - */ - block.block_no = ($4)::word31type - AND tx.block_index >= ($6)::word31type - ) - )`; - } - } else { - // paginationPoinType === DiffItemType.OUTPUT - if ( - diffItemType === DiffItemType.INPUT || - diffItemType === DiffItemType.COLLATERAL - ) { - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs */ - block.block_no = ($4)::word31type - AND tx.block_index > ($6)::word31type - ) /* because inputs and collerals follow outputs, - inputs and collerals of the same tx are before the pagination point */ - )`; - } else { - // (diffItemType === DiffItemType.OUTPUT) - linearizedOrderCond = `( - /* following blocks */ - block.block_no > ($4)::word31type - OR ( - /* the same block, following txs */ - block.block_no = ($4)::word31type - AND tx.block_index > ($6)::word31type - ) OR ( - /* the same tx, following collaterals */ - block.block_no = ($4)::word31type - AND tx.block_index = ($6)::word31type - AND tx_out.index > ($5)::txindex - ) - )`; - } - } - - return `WHERE ${validContract ? "" : "NOT "}tx.valid_contract - AND block.block_no <= ($3)::word31type - AND ${linearizedOrderCond} - AND ( - tx_out.address = any(($1)::varchar array) - OR tx_out.payment_cred = any(($2)::bytea array) - )`; -}; - -const buildInputQuery = (paginationPoinType: DiffItemType | null) => { - return `${buildSelectColumns(DiffItemType.INPUT)} - , encode(src_tx.hash,'hex') as src_hash - ${buildSelectFromForInputs()} - ${buildWhereClause(true, DiffItemType.INPUT, paginationPoinType)}`; -}; - -const buildCollateralQuery = (paginationPoinType: DiffItemType | null) => { - return `${buildSelectColumns(DiffItemType.COLLATERAL)} - , encode(src_tx.hash,'hex') as src_hash - ${buildSelectFromForCollaterals()} - ${buildWhereClause(false, DiffItemType.COLLATERAL, paginationPoinType)}`; -}; - -const buildOutputQuery = (paginationPointType: DiffItemType | null) => { - return `${buildSelectColumns(DiffItemType.OUTPUT)} - , null as src_hash - ${buildSelectFromForOutputs()} - ${buildWhereClause(true, DiffItemType.OUTPUT, paginationPointType)}`; -}; - -const buildFullQuery = (paginationPoinType: DiffItemType | null) => { - return `SELECT * FROM ( - ${buildInputQuery(paginationPoinType)} - UNION ALL - ${buildCollateralQuery(paginationPoinType)} - UNION ALL - ${buildOutputQuery(paginationPoinType)} - ) as q - ORDER BY - q."blockNumber", - q."blockIndex", - CASE q."diffItemType" WHEN 'input' THEN 0 WHEN 'collateral' THEN 1 ELSE 2 END, - q."paginationPointValue" - LIMIT $${5 + (paginationPoinType !== null ? 2 : 0)}::word31type;`; -}; - -export const handleUtxoDiffSincePoint = - (pool: Pool) => async (req: Request, res: Response) => { - const { addresses, untilBlockHash, afterPoint, diffLimit } = - await extractBodyParameters(req.body); - - const untilBlock = await getBlock(pool)(untilBlockHash); - if (!untilBlock) { - throw new Error("REFERENCE_BESTBLOCK_NOT_FOUND"); - } - - const afterBlock = await getBlock(pool)(afterPoint.blockHash); - if (!afterBlock) { - throw new Error("REFERENCE_POINT_BLOCK_NOT_FOUND"); - } - - const addressTypes = getAddressesByType(addresses); - const verifiedAddresses = validateAddressesReq( - addressesRequestLimit, - addresses - ); - - const fullQuery = buildFullQuery(afterPoint.paginationPointType); - - switch (verifiedAddresses.kind) { - case "ok": { - const queryParameters: any[] = [ - [...addressTypes.legacyAddr, ...addressTypes.bech32], - addressTypes.paymentCreds, - untilBlock.number, - afterBlock.number, - ]; - - if (afterPoint.paginationPointType !== null) { - if (afterPoint.txHash == null) { - throw new Error("won't happen"); - } - const afterPointTx = await getTransactionRowByHash(pool)( - afterPoint.txHash - ); - if (!afterPointTx) { - throw new Error("afterPoint.txHash not found"); - } - - queryParameters.push(afterPoint.paginationPointValue); - queryParameters.push(afterPointTx.blockIndex); - } - - queryParameters.push(diffLimit); - - const result = await pool.query(fullQuery, queryParameters); - - const apiResponse = {} as any; - - if (result.rows.length === 0) { - apiResponse.diffItems = []; - res.send(apiResponse); - return; - } - - const linearized = [] as any[]; - for (const row of result.rows) { - if ( - [DiffItemType.INPUT, DiffItemType.COLLATERAL].includes( - row.diffItemType - ) - ) { - linearized.push({ - type: DiffItemType.INPUT, - id: `${row.src_hash}:${row.index}`, - amount: row.value, - }); - } else { - linearized.push({ - type: DiffItemType.OUTPUT, - id: `${row.hash}:${row.index}`, - receiver: row.address, - amount: row.value, - assets: extractAssets(row.assets), - block_num: row.blockNumber, - tx_hash: row.hash, - tx_index: row.index, - }); - } - } - - const lastRow = result.rows[result.rowCount - 1]; - - apiResponse.lastDiffPointSelected = { - blockHash: lastRow.blockHash, - txHash: lastRow.hash, - paginationPointType: lastRow.diffItemType, - paginationPointValue: lastRow.paginationPointValue, - }; - apiResponse.diffItems = linearized; - - res.send(apiResponse); - break; - } - case "error": - throw new Error(verifiedAddresses.errMsg); - default: - return assertNever(verifiedAddresses); - } - }; diff --git a/src/services/utxoForAddress.ts b/src/services/utxoForAddress.ts deleted file mode 100644 index b1623b40..00000000 --- a/src/services/utxoForAddress.ts +++ /dev/null @@ -1,57 +0,0 @@ -import { Pool } from "pg"; -import { Request, Response } from "express"; - -import config from "config"; -import { - assertNever, - validateAddressesReq, - getAddressesByType, - extractAssets, -} from "../utils"; - -const utxoForAddressQuery = ` - SELECT * - FROM valid_utxos_view - WHERE address = any(($1)::varchar array) - OR payment_cred = any(($2)::bytea array); -`; - -const addressesRequestLimit: number = config.get("server.addressRequestLimit"); - -export const utxoForAddresses = - (pool: Pool) => async (req: Request, res: Response) => { - if (!req.body || !req.body.addresses) { - throw new Error("error, no addresses."); - return; - } - const addressTypes = getAddressesByType(req.body.addresses); - const verifiedAddresses = validateAddressesReq( - addressesRequestLimit, - req.body.addresses - ); - switch (verifiedAddresses.kind) { - case "ok": { - const result = await pool.query(utxoForAddressQuery, [ - [...addressTypes.legacyAddr, ...addressTypes.bech32], - addressTypes.paymentCreds, - ]); - const utxos = result.rows.map((utxo) => ({ - utxo_id: `${utxo.hash}:${utxo.index}`, - tx_hash: utxo.hash, - tx_index: utxo.index, - receiver: utxo.address, - amount: utxo.value.toString(), - dataHash: utxo.data_hash, - assets: extractAssets(utxo.assets), - block_num: utxo.blockNumber, - })); - res.send(utxos); - return; - } - case "error": - throw new Error(verifiedAddresses.errMsg); - return; - default: - return assertNever(verifiedAddresses); - } - }; diff --git a/src/services/utxoSumForAddress.ts b/src/services/utxoSumForAddress.ts deleted file mode 100644 index 0971f498..00000000 --- a/src/services/utxoSumForAddress.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { errMsgs, UtilEither } from "../utils"; -import { Pool } from "pg"; -import { UtxoSumResponse } from "../Transactions/types"; - -export const askUtxoSumForAddresses = async ( - pool: Pool, - addresses: string[] -): Promise> => { - // TODO: support for payment keys - const sqlQuery = ` - SELECT SUM(value) as value - FROM valid_utxos_view - WHERE address = any(($1)::varchar array) - `; - - const tokensQuery = ` - SELECT SUM(ma_utxo.quantity) amount, - encode(ma.policy, 'hex') as policy, - encode(ma.name, 'hex') as name - FROM valid_utxos_view utxo - INNER JOIN ma_tx_out ma_utxo ON utxo.id = ma_utxo.tx_out_id - INNER JOIN multi_asset ma ON ma_utxo.ident = ma.id - WHERE address = any(($1)::varchar array) - GROUP BY - ma.policy, - ma.name; - `; - - if (addresses.length == 0) return { kind: "error", errMsg: errMsgs.noValue }; - - try { - const res = await pool.query(sqlQuery, [addresses]); - const totalAda = res.rows.length > 0 ? res.rows[0].value : "0"; - - const tokensRes = await pool.query(tokensQuery, [addresses]); - - return { - kind: "ok", - value: { - sum: totalAda, - tokensBalance: tokensRes.rows.map((r) => { - return { - amount: r.amount, - assetId: `${r.policy}.${r.name}`, - }; - }), - }, - }; - } catch (err: any) { - const errString = err.stack + ""; - return { - kind: "error", - errMsg: "askUtxoSumForAddresses error: " + errString, - }; - } -}; diff --git a/src/services/validateNft.ts b/src/services/validateNft.ts deleted file mode 100644 index 629fecf9..00000000 --- a/src/services/validateNft.ts +++ /dev/null @@ -1,152 +0,0 @@ -import config from "config"; -import { Pool } from "pg"; -import { Request, Response } from "express"; - -import AWS from "aws-sdk"; - -const query = `SELECT encode(asset.policy, 'hex') as "policy", - encode(asset.name, 'escape') as "name", - meta.key as "meta_label", - meta.json as "metadata" -FROM multi_asset asset - INNER JOIN ma_tx_mint mint on asset.id = mint.ident - INNER JOIN tx on mint.tx_id = tx.id - INNER JOIN tx_metadata meta on tx.id = meta.tx_id -WHERE asset.fingerprint = $1::text`; - -const getLambda = (): AWS.Lambda => { - return new AWS.Lambda({ - region: config.get("aws.region"), - credentials: { - accessKeyId: config.get("aws.accessKeyId"), - secretAccessKey: config.get("aws.secretAccessKey"), - }, - }); -}; - -const verifyExistingAnalysis = async ( - lambda: AWS.Lambda, - fingerprint: string, - envName: string -) => { - const nftValidatorLambdaNameTemplate: string = config.get( - "aws.lambda.nftValidator" - ); - const functionName = nftValidatorLambdaNameTemplate.replace( - "{envName}", - envName - ); - - const response = await lambda - .invoke({ - FunctionName: functionName, - InvocationType: "RequestResponse", - Payload: JSON.stringify({ - action: "Verify", - fingerprint: fingerprint, - }), - }) - .promise(); - - if (response.FunctionError) throw new Error(response.FunctionError); - if (!response.Payload) throw new Error("unexpected error"); - - return JSON.parse(response.Payload.toString()); -}; - -const sendNftForAnalysis = async ( - lambda: AWS.Lambda, - fingerprint: string, - metadatImage: string, - envName: string -): Promise => { - const nftValidatorLambdaNameTemplate: string = config.get( - "aws.lambda.nftValidator" - ); - const functionName = nftValidatorLambdaNameTemplate.replace( - "{envName}", - envName - ); - - const response = await lambda - .invoke({ - FunctionName: functionName, - InvocationType: "Event", - Payload: JSON.stringify({ - action: "Validate", - fingerprint: fingerprint, - metadata: { - image: metadatImage, - }, - }), - }) - .promise(); - - if (response.FunctionError) throw new Error(response.FunctionError); -}; - -export const handleValidateNft = - (pool: Pool) => async (req: Request, res: Response) => { - const fingerprint: string = req.params.fingerprint; - if (!fingerprint) { - return res.status(400).send({ - error: "missing fingerprint", - }); - } - - const envName = req.body.envName ?? "dev"; - - const lambda = getLambda(); - const existingAnalysis = await verifyExistingAnalysis( - lambda, - fingerprint, - envName - ); - if (existingAnalysis !== "NOT_FOUND") { - return res.status(200).send(existingAnalysis); - } - - const result = await pool.query(query, [fingerprint]); - if (result.rowCount === 0) { - return res.status(404).send({ - error: "Not found", - }); - } - - const item = result.rows[0]; - if (item.meta_label !== "721") { - return res.status(409).send({ - error: `the asset was found, but it has an incorrect metadata label. Expected '721, but it is ${item.meta_label}`, - }); - } - - if (!item.metadata[item.policy]) { - return res.status(409).send({ - metadata: item.metadata, - error: `missing policy ('${item.policy}') field on metadata`, - }); - } - - if (!item.metadata[item.policy][item.name]) { - return res.status(409).send({ - metadata: item.metadata, - error: `missing name ('${item.name}') field on metadata`, - }); - } - - const metadata = item.metadata[item.policy][item.name]; - if (!metadata.image) { - return res.status(409).send({ - metadata: item.metadata, - error: "missing image field on metadata", - }); - } - - if (req.query.skipValidation) { - return res.status(204).send(); - } - - await sendNftForAnalysis(lambda, fingerprint, metadata.image, envName); - - return res.status(202).send(); - };