Skip to content

Commit

Permalink
feat: added config to run benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
paulobressan committed Dec 5, 2023
1 parent cab4d92 commit ac139cc
Show file tree
Hide file tree
Showing 4 changed files with 227 additions and 0 deletions.
1 change: 1 addition & 0 deletions benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.env
5 changes: 5 additions & 0 deletions benchmark/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM alpine:latest

RUN apk --no-cache add postgresql-contrib

ENTRYPOINT [ "pgbench" ]
58 changes: 58 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Benchmark

This folder has a configuration to run benchmarks on Postgres dbsync. Docker is used to access the tool pgbench and bench.sql is some common queries.

## Compile docker image

To use the image is necessary to compile

```bash
docker build -t pgbench .
```

## Environment

The pgbench needs some environment variables to work, then create a file `.env` and set these envs below

| Key | Value |
| ---------- | ----- |
| PGDATABASE | |
| PGHOST | |
| PGPORT | |
| PGUSER | |
| PGPASSWORD | |

## Run benchmark

To run the benchmark it's necessary to run the docker image compiled before, but it's necessary to use some parameters of pgbench.

```bash
docker run --env-file .env --network host --volume ./bench.sql:/bench.sql pgbench:latest -c 10 -T 5 -n -f /bench.sql
```

- `-c` concurrences users
- `-T` execution time(seconds)
- `-n` enable for the custom scripts
- `-f` script path

more parameters
https://www.postgresql.org/docs/devel/pgbench.html

### Metrics example

The return when the command is finished

```
transaction type: /bench.sql
scaling factor: 1
query mode: simple
number of clients: 4
number of threads: 1
maximum number of tries: 1
duration: 10 s
number of transactions actually processed: 16
number of failed transactions: 0 (0.000%)
latency average = 1562.050 ms
initial connection time = 3951.848 ms
tps = 2.560738 (without initial connection time)
```
163 changes: 163 additions & 0 deletions benchmark/bench.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
--- Select latest epoch parameters
select
*
from
epoch_param ep
order by
ep.epoch_no desc
limit 1;

--- Select assets by policy and apply filters by metadata
with asset as (
select
ma_tx_mint.tx_id,
encode(multi_asset.name, 'escape') as name,
encode(multi_asset.policy, 'hex') as policy,
multi_asset.fingerprint
from
multi_asset
inner join
ma_tx_mint on
ma_tx_mint.ident = multi_asset.id
where
multi_asset.policy = '\x8f80ebfaf62a8c33ae2adf047572604c74db8bc1daba2b43f9a65635'
),metadata as (
select
tx_metadata.tx_id,
tx_metadata.json as metadata
from
tx_metadata
where
tx_metadata.tx_id in (select tx_id from asset)
)
select
*,
count(*) over () as count
from
asset
inner join metadata on
asset.tx_id = metadata.tx_id
where
jsonb_path_query_array(metadata.metadata,'$.*.*.type') ?| array['Orc']
order by
asset.name asc
limit 20 offset 0;

--- Select total assets by policy from stake address
select
sum(ma_tx_out.quantity) as quantity,
encode(multi_asset.policy, 'hex') as policy
from
utxo_view
inner join
stake_address on stake_address.id = utxo_view.stake_address_id
inner join
ma_tx_out on ma_tx_out.tx_out_id = utxo_view.id
inner join
multi_asset on multi_asset.id = ma_tx_out.ident
where
stake_address."view" = 'stake1u90nkx5yw6qkpas3kxa0wcql93axph8fetw20l0j2ntszucgg4rr2'
and
multi_asset.policy = '\xb7761c472eef3b6e0505441efaf940892bb59c01be96070b0a0a89b3'
group by multi_asset.policy;

--- Select all assets from a stake address
select
ma_tx_out.tx_out_id,
ma_tx_out.quantity,
encode(multi_asset.name, 'escape') as name,
encode(multi_asset.policy, 'hex') as policy,
multi_asset.fingerprint,
tx_metadata.json as metadata,
count(*) over () as count
from
utxo_view
inner join
stake_address on stake_address.id = utxo_view.stake_address_id
inner join
ma_tx_out on ma_tx_out.tx_out_id = utxo_view.id
inner join
multi_asset on multi_asset.id = ma_tx_out.ident
inner join
ma_tx_mint on ma_tx_mint.ident = multi_asset.id
inner join
tx_metadata on tx_metadata.tx_id = ma_tx_mint.tx_id
where
stake_address.view = 'stake1u90nkx5yw6qkpas3kxa0wcql93axph8fetw20l0j2ntszucgg4rr2'
order by
multi_asset.name asc
limit 20 offset 1;

--- Select all utxos from a stake address
select
tx_out.id,
tx.hash,
tx_out.index,
tx_out.address,
tx_out.value
from
tx_out
left join
tx_in on tx_out.tx_id = tx_in.tx_out_id and tx_out.index::smallint = tx_in.tx_out_index::smallint
left join
tx on tx.id = tx_out.tx_id
left join
block on tx.block_id = block.id
inner join
stake_address on stake_address.id = tx_out.stake_address_id
where
tx_in.tx_in_id is null and
block.epoch_no is not null and
stake_address.view = 'stake1u90nkx5yw6qkpas3kxa0wcql93axph8fetw20l0j2ntszucgg4rr2';

--- Select slot number of the most recent block
select
slot_no
from
block
where
block_no is not null
order by
block_no desc
limit 1;

--- Select current valid pools
select
*
from
pool_update
where
registered_tx_id in (select max(registered_tx_id) from pool_update group by hash_id)
and
not exists(
select
*
from
pool_retire
where
pool_retire.hash_id = pool_update.hash_id
and
pool_retire.retiring_epoch <= (select max (epoch_no) from block)
);

--- Select the stake address for a given Shelley address
select
stake_address.id as stake_address_id,
tx_out.address,
stake_address.view as stake_address
from
tx_out
inner join
stake_address on tx_out.stake_address_id = stake_address.id
where
address = 'addr1q8u4wgd8qplhxpt4xm2l8yagy5ng7veurwrns2ysh03zuh2l8vdgga5pvrmprvd67asp7tr6vrwwnjku5l7ly4xhq9esr9h59t';

--- Select transaction outputs for specified transaction hash
select
tx_out.*
from
tx_out
inner join
tx on tx_out.tx_id = tx.id
where
tx.hash = '\xabd21556d9bb817d436e33a5fa32619702633dc809e707a5297566e9d74d57c1';

0 comments on commit ac139cc

Please sign in to comment.