From 069327fb8239472d3dc5647cd316b9fb883c3756 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Fri, 15 Nov 2024 13:37:31 +0100 Subject: [PATCH] Fix running of Q28 by not removing important backslashes from regex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Q28 contains backslashes, specifically the backslash in `\1` is very important for the meaning of the query. However, both backslashes in this query are accidentally removed in almost all benchmark scripts because they used `read` instead of `read -r`. This adds the `-r` flag to all the scripts that didn't have it yet, to make sure that `read` interprets it as a raw `\` character instead of an escape sequence. With this change the REGEXP_REPLACE call doesn't always return the literal string "1", but actually (as intended by the query) the referrer domain. This change generally makes the query significantly slower to execute, because now there are many more groups. I'm not sure how this should be listed in the results UI. It might make sense to ignore the query or something. This problem was found independently in #47 and #72, but fixed in a different way. I'm not sure if the fix that was made for starrocks in #72 is actually correct though, because it seems to fix the issue twice: It replaces `\1` with `\\1`, but it also added the `-r` flag to `read`. Apart from those issues there were already a few benchmarks that were using the `-r` flag: ``` ❯ rg 'while read -r' elasticsearch/run.sh 5:cat 'queries.sql' | while read -r QUERY; do starrocks/run.sh 8:cat queries.sql | while read -r query; do doris/run.sh 8:while read -r query; do crunchy-bridge-for-analytics/run.sh 22: while read -r query; do ``` --- alloydb/run.sh | 2 +- athena/run.sh | 2 +- aurora-mysql/run.sh | 2 +- aurora-postgresql/run.sh | 2 +- bigquery/run.sh | 2 +- byconity/run.sh | 2 +- bytehouse/run.sh | 2 +- chdb-parquet/run.sh | 2 +- chdb/run.sh | 2 +- citus/run.sh | 2 +- clickhouse-cloud/run.sh | 2 +- clickhouse-datalake/run.sh | 2 +- clickhouse-parquet/run.sh | 2 +- clickhouse-web/run.sh | 2 +- clickhouse/extended/latency.sh | 2 +- clickhouse/extended/throughput.sh | 2 +- clickhouse/run.sh | 2 +- cloudberry/run.sh | 2 +- cratedb/run.sh | 2 +- databend/run.sh | 2 +- datafusion/run.sh | 2 +- datafusion/run2.sh | 2 +- doris/queries.sql | 2 +- druid/run.sh | 2 +- duckdb-parquet/run.sh | 2 +- duckdb/run.sh | 2 +- generate-results.sh | 2 +- glaredb/benchmark.sh | 2 +- gravitons/generate-results.sh | 2 +- greenplum/run.sh | 2 +- hardware/benchmark-chyt.sh | 2 +- hardware/benchmark-cloud.sh | 2 +- hardware/benchmark-new.sh | 2 +- hardware/benchmark-yql.sh | 2 +- hardware/generate-results.sh | 2 +- hardware/hardware.sh | 2 +- heavyai/run.sh | 2 +- hydra/run.sh | 2 +- infobright/run.sh | 2 +- kinetica/run.sh | 2 +- mariadb-columnstore/run.sh | 2 +- mariadb/run.sh | 2 +- monetdb/run.sh | 2 +- motherduck/run.sh | 2 +- mysql-myisam/run.sh | 2 +- mysql/run.sh | 2 +- oxla/run.sh | 2 +- paradedb/run.sh | 2 +- pinot/run.sh | 2 +- postgresql-tuned/run.sh | 2 +- postgresql/run.sh | 2 +- questdb-partitioned/run.sh | 2 +- questdb/run.sh | 2 +- redshift-serverless/run.sh | 2 +- redshift/run.sh | 2 +- singlestore/run.sh | 2 +- singlestore/run_on_cloud.sh | 2 +- sqlite/run.sh | 2 +- tablespace/run.sh | 2 +- tembo-olap/run.sh | 2 +- timescaledb-compressed/run.sh | 2 +- timescaledb/run.sh | 2 +- umbra/run.sh | 2 +- versions/scripts/benchmarks.sh | 6 +++--- versions/unified_scripts/benchmarks.sh | 2 +- vertica/run.sh | 2 +- 66 files changed, 68 insertions(+), 68 deletions(-) diff --git a/alloydb/run.sh b/alloydb/run.sh index a2fe56c5e..a00c1f6ef 100755 --- a/alloydb/run.sh +++ b/alloydb/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/athena/run.sh b/athena/run.sh index f1ce446f0..5d58552e8 100755 --- a/athena/run.sh +++ b/athena/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do for i in $(seq 1 $TRIES); do aws athena --output json start-query-execution --query-execution-context 'Database=test' --result-configuration "OutputLocation=${OUTPUT}" --query-string "${query}" | jq '.QueryExecutionId' done diff --git a/aurora-mysql/run.sh b/aurora-mysql/run.sh index ee264bfaf..300b25b31 100755 --- a/aurora-mysql/run.sh +++ b/aurora-mysql/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do for i in $(seq 1 $TRIES); do mysql -h "${FQDN}" -u admin --password="${PASSWORD}" test -vvv -e "${query}" done; diff --git a/aurora-postgresql/run.sh b/aurora-postgresql/run.sh index 990f0341f..145d84735 100755 --- a/aurora-postgresql/run.sh +++ b/aurora-postgresql/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo "$query"; for i in $(seq 1 $TRIES); do psql -U postgres -h "${FQDN}" test -t -c '\timing' -c "$query" | grep 'Time' diff --git a/bigquery/run.sh b/bigquery/run.sh index 1a48f9a1c..90d95bb29 100755 --- a/bigquery/run.sh +++ b/bigquery/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo "$query"; for i in $(seq 1 $TRIES); do time bq query --use_legacy_sql=false --use_cache=false <<< "$query" diff --git a/byconity/run.sh b/byconity/run.sh index 0b54c15c0..720c67860 100755 --- a/byconity/run.sh +++ b/byconity/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do [ -z "$FQDN" ] && sync [ -z "$FQDN" ] && echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/bytehouse/run.sh b/bytehouse/run.sh index c00ea8ea0..a5e3e63d5 100644 --- a/bytehouse/run.sh +++ b/bytehouse/run.sh @@ -1,7 +1,7 @@ #!/bin/bash TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do for i in $(seq 1 $TRIES); do ./bytehouse-cli --user "$user" --account "$account" --password "$password" --region ap-southeast-1 --secure --warehouse "$warehouse" --database test --query "${query}" done diff --git a/chdb-parquet/run.sh b/chdb-parquet/run.sh index 64df8c608..02cb4f6d7 100755 --- a/chdb-parquet/run.sh +++ b/chdb-parquet/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/chdb/run.sh b/chdb/run.sh index 64df8c608..02cb4f6d7 100755 --- a/chdb/run.sh +++ b/chdb/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/citus/run.sh b/citus/run.sh index 7adee7c46..00d65ab2d 100755 --- a/citus/run.sh +++ b/citus/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/clickhouse-cloud/run.sh b/clickhouse-cloud/run.sh index 76b95722b..d24e6465d 100755 --- a/clickhouse-cloud/run.sh +++ b/clickhouse-cloud/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo -n "[" for i in $(seq 1 $TRIES); do clickhouse-client --host "${FQDN:=localhost}" --password "${PASSWORD:=}" ${PASSWORD:+--secure} --time --format=Null --query="$query" --progress 0 2>&1 | diff --git a/clickhouse-datalake/run.sh b/clickhouse-datalake/run.sh index 0fd7f5694..eb1f30f98 100755 --- a/clickhouse-datalake/run.sh +++ b/clickhouse-datalake/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo -n "[" for i in $(seq 1 $TRIES); do diff --git a/clickhouse-parquet/run.sh b/clickhouse-parquet/run.sh index 7423e6c21..86d94375b 100755 --- a/clickhouse-parquet/run.sh +++ b/clickhouse-parquet/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/clickhouse-web/run.sh b/clickhouse-web/run.sh index e450083a2..a2394eeaa 100755 --- a/clickhouse-web/run.sh +++ b/clickhouse-web/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do clickhouse-client --query "SYSTEM DROP FILESYSTEM CACHE" echo -n "[" diff --git a/clickhouse/extended/latency.sh b/clickhouse/extended/latency.sh index 8eda66f59..00a55b4a7 100755 --- a/clickhouse/extended/latency.sh +++ b/clickhouse/extended/latency.sh @@ -5,7 +5,7 @@ PASSWORD="$2" TRIES=10 -cat queries_latency.sql | while read query; do +cat queries_latency.sql | while read -r query; do echo "$query" clickhouse-local --query "SELECT format(\$\$ $query \$\$, c1) FROM file('random_counters.tsv') ORDER BY rand() LIMIT ${TRIES} FORMAT TSV" | clickhouse-benchmark --concurrency 10 --iterations "${TRIES}" --delay 0 --secure --host "$FQDN" --password "$PASSWORD" 2>&1 | grep -F '50.000%' diff --git a/clickhouse/extended/throughput.sh b/clickhouse/extended/throughput.sh index 333309134..1bd827012 100755 --- a/clickhouse/extended/throughput.sh +++ b/clickhouse/extended/throughput.sh @@ -5,7 +5,7 @@ PASSWORD="$2" TRIES=3 QUERY_NUM=1 -cat queries_throughput.sql | while read query; do +cat queries_throughput.sql | while read -r query; do echo -n "[" for i in $(seq 1 $TRIES); do RES=$(clickhouse-client --host "$FQDN" --password "$PASSWORD" --secure --time --format=Null --query="$query" 2>&1 ||:) diff --git a/clickhouse/run.sh b/clickhouse/run.sh index f8957fe48..bdee844f6 100755 --- a/clickhouse/run.sh +++ b/clickhouse/run.sh @@ -11,7 +11,7 @@ fi TRIES=3 QUERY_NUM=1 -cat queries"$SUFFIX".sql | while read query; do +cat queries"$SUFFIX".sql | while read -r query; do [ -z "$FQDN" ] && sync [ -z "$FQDN" ] && echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/cloudberry/run.sh b/cloudberry/run.sh index 1eb622de9..036d9d948 100644 --- a/cloudberry/run.sh +++ b/cloudberry/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/cratedb/run.sh b/cratedb/run.sh index 477f46328..d7ae899e5 100755 --- a/cratedb/run.sh +++ b/cratedb/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/databend/run.sh b/databend/run.sh index 9a2517799..8e53bbae5 100755 --- a/databend/run.sh +++ b/databend/run.sh @@ -2,7 +2,7 @@ TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do [ -z "$FQDN" ] && sync [ -z "$FQDN" ] && echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/datafusion/run.sh b/datafusion/run.sh index a6cc85626..9dccb3878 100755 --- a/datafusion/run.sh +++ b/datafusion/run.sh @@ -19,7 +19,7 @@ fi TRIES=3 QUERY_NUM=1 echo $1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/datafusion/run2.sh b/datafusion/run2.sh index 901badd57..d5a707427 100644 --- a/datafusion/run2.sh +++ b/datafusion/run2.sh @@ -1,7 +1,7 @@ #!/bin/bash QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/doris/queries.sql b/doris/queries.sql index 77ed25865..28dbd29db 100644 --- a/doris/queries.sql +++ b/doris/queries.sql @@ -26,7 +26,7 @@ SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime LIMIT SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY SearchPhrase LIMIT 10; SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime, SearchPhrase LIMIT 10; SELECT CounterID, AVG(length(URL)) AS l, COUNT(*) AS c FROM hits WHERE URL <> '' GROUP BY CounterID HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25; -SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\\1') AS k, AVG(length(Referer)) AS l, COUNT(*) AS c, MIN(Referer) FROM hits WHERE Referer <> '' GROUP BY k HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25; +SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS k, AVG(length(Referer)) AS l, COUNT(*) AS c, MIN(Referer) FROM hits WHERE Referer <> '' GROUP BY k HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25; SELECT SUM(ResolutionWidth), SUM(ResolutionWidth + 1), SUM(ResolutionWidth + 2), SUM(ResolutionWidth + 3), SUM(ResolutionWidth + 4), SUM(ResolutionWidth + 5), SUM(ResolutionWidth + 6), SUM(ResolutionWidth + 7), SUM(ResolutionWidth + 8), SUM(ResolutionWidth + 9), SUM(ResolutionWidth + 10), SUM(ResolutionWidth + 11), SUM(ResolutionWidth + 12), SUM(ResolutionWidth + 13), SUM(ResolutionWidth + 14), SUM(ResolutionWidth + 15), SUM(ResolutionWidth + 16), SUM(ResolutionWidth + 17), SUM(ResolutionWidth + 18), SUM(ResolutionWidth + 19), SUM(ResolutionWidth + 20), SUM(ResolutionWidth + 21), SUM(ResolutionWidth + 22), SUM(ResolutionWidth + 23), SUM(ResolutionWidth + 24), SUM(ResolutionWidth + 25), SUM(ResolutionWidth + 26), SUM(ResolutionWidth + 27), SUM(ResolutionWidth + 28), SUM(ResolutionWidth + 29), SUM(ResolutionWidth + 30), SUM(ResolutionWidth + 31), SUM(ResolutionWidth + 32), SUM(ResolutionWidth + 33), SUM(ResolutionWidth + 34), SUM(ResolutionWidth + 35), SUM(ResolutionWidth + 36), SUM(ResolutionWidth + 37), SUM(ResolutionWidth + 38), SUM(ResolutionWidth + 39), SUM(ResolutionWidth + 40), SUM(ResolutionWidth + 41), SUM(ResolutionWidth + 42), SUM(ResolutionWidth + 43), SUM(ResolutionWidth + 44), SUM(ResolutionWidth + 45), SUM(ResolutionWidth + 46), SUM(ResolutionWidth + 47), SUM(ResolutionWidth + 48), SUM(ResolutionWidth + 49), SUM(ResolutionWidth + 50), SUM(ResolutionWidth + 51), SUM(ResolutionWidth + 52), SUM(ResolutionWidth + 53), SUM(ResolutionWidth + 54), SUM(ResolutionWidth + 55), SUM(ResolutionWidth + 56), SUM(ResolutionWidth + 57), SUM(ResolutionWidth + 58), SUM(ResolutionWidth + 59), SUM(ResolutionWidth + 60), SUM(ResolutionWidth + 61), SUM(ResolutionWidth + 62), SUM(ResolutionWidth + 63), SUM(ResolutionWidth + 64), SUM(ResolutionWidth + 65), SUM(ResolutionWidth + 66), SUM(ResolutionWidth + 67), SUM(ResolutionWidth + 68), SUM(ResolutionWidth + 69), SUM(ResolutionWidth + 70), SUM(ResolutionWidth + 71), SUM(ResolutionWidth + 72), SUM(ResolutionWidth + 73), SUM(ResolutionWidth + 74), SUM(ResolutionWidth + 75), SUM(ResolutionWidth + 76), SUM(ResolutionWidth + 77), SUM(ResolutionWidth + 78), SUM(ResolutionWidth + 79), SUM(ResolutionWidth + 80), SUM(ResolutionWidth + 81), SUM(ResolutionWidth + 82), SUM(ResolutionWidth + 83), SUM(ResolutionWidth + 84), SUM(ResolutionWidth + 85), SUM(ResolutionWidth + 86), SUM(ResolutionWidth + 87), SUM(ResolutionWidth + 88), SUM(ResolutionWidth + 89) FROM hits; SELECT SearchEngineID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10; SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; diff --git a/druid/run.sh b/druid/run.sh index faa88431b..2ffb72838 100755 --- a/druid/run.sh +++ b/druid/run.sh @@ -1,7 +1,7 @@ #!/bin/bash TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync for i in $(seq 1 100); do CHECK=$(curl -o /dev/null -w '%{http_code}' -s -XPOST -H'Content-Type: application/json' http://localhost:8888/druid/v2/sql/ -d @check.json }) diff --git a/duckdb-parquet/run.sh b/duckdb-parquet/run.sh index 64df8c608..02cb4f6d7 100755 --- a/duckdb-parquet/run.sh +++ b/duckdb-parquet/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/duckdb/run.sh b/duckdb/run.sh index 64df8c608..02cb4f6d7 100755 --- a/duckdb/run.sh +++ b/duckdb/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/generate-results.sh b/generate-results.sh index e4dfad1f3..4952c614f 100755 --- a/generate-results.sh +++ b/generate-results.sh @@ -14,7 +14,7 @@ fi sed '/^const data = \[$/q' index.html FIRST=1 - LANG="" ls -1 */results/*.json | while read file + LANG="" ls -1 */results/*.json | while read -r file do [[ $file =~ ^(hardware|versions|gravitons)/ ]] && continue; diff --git a/glaredb/benchmark.sh b/glaredb/benchmark.sh index 902c66654..8eb96608a 100755 --- a/glaredb/benchmark.sh +++ b/glaredb/benchmark.sh @@ -7,7 +7,7 @@ curl https://glaredb.com/install.sh | sh wget https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits_compatible/athena/hits.parquet -cat queries.sql | while read query +cat queries.sql | while read -r query do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/gravitons/generate-results.sh b/gravitons/generate-results.sh index 6ff8efb4e..e359b771d 100755 --- a/gravitons/generate-results.sh +++ b/gravitons/generate-results.sh @@ -7,7 +7,7 @@ sed '/^const data = \[$/q' index.html FIRST=1 - ls -1 results/*.json | while read file + ls -1 results/*.json | while read -r file do [ "${FIRST}" = "0" ] && echo -n ',' jq --compact-output ". += {\"source\": \"${file}\"}" "${file}" diff --git a/greenplum/run.sh b/greenplum/run.sh index 1eb622de9..036d9d948 100755 --- a/greenplum/run.sh +++ b/greenplum/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/hardware/benchmark-chyt.sh b/hardware/benchmark-chyt.sh index 778ce4f86..895400c7d 100755 --- a/hardware/benchmark-chyt.sh +++ b/hardware/benchmark-chyt.sh @@ -4,7 +4,7 @@ QUERIES_FILE="queries.sql" TABLE=$1 TRIES=3 -cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read query; do +cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read -r query; do echo -n "[" for i in $(seq 1 $TRIES); do diff --git a/hardware/benchmark-cloud.sh b/hardware/benchmark-cloud.sh index 01376e400..dc85a0708 100755 --- a/hardware/benchmark-cloud.sh +++ b/hardware/benchmark-cloud.sh @@ -20,7 +20,7 @@ fi QUERY_ID_PREFIX="benchmark_$RANDOM" QUERY_NUM=1 -cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query +cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read -r query do for i in $(seq 1 $TRIES) do diff --git a/hardware/benchmark-new.sh b/hardware/benchmark-new.sh index 0c4cad6e5..9ab480425 100755 --- a/hardware/benchmark-new.sh +++ b/hardware/benchmark-new.sh @@ -15,7 +15,7 @@ else exit 1 fi -cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do +cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/hardware/benchmark-yql.sh b/hardware/benchmark-yql.sh index 7d30d39e7..184bfd760 100755 --- a/hardware/benchmark-yql.sh +++ b/hardware/benchmark-yql.sh @@ -4,7 +4,7 @@ QUERIES_FILE="queries.sql" TABLE=$1 TRIES=3 -cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read query; do +cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read -r query; do echo -n "[" for i in $(seq 1 $TRIES); do diff --git a/hardware/generate-results.sh b/hardware/generate-results.sh index d18c02049..d242d0681 100755 --- a/hardware/generate-results.sh +++ b/hardware/generate-results.sh @@ -14,7 +14,7 @@ fi sed '/^const data = \[$/q' index.html FIRST=1 - ls -1 results/*.json | while read file + ls -1 results/*.json | while read -r file do [ "${FIRST}" = "0" ] && echo -n ',' jq --compact-output ". += {\"source\": \"${file}\"}" "${file}" diff --git a/hardware/hardware.sh b/hardware/hardware.sh index 2e656a7b3..512498ad7 100755 --- a/hardware/hardware.sh +++ b/hardware/hardware.sh @@ -63,7 +63,7 @@ echo >result.csv QUERY_NUM=1 -cat "$QUERIES_FILE" | sed "s/{table}/hits/g" | while read query; do +cat "$QUERIES_FILE" | sed "s/{table}/hits/g" | while read -r query; do sync if [ "${OS}" = "Darwin" ] then diff --git a/heavyai/run.sh b/heavyai/run.sh index 97387bee4..2a264a7b4 100755 --- a/heavyai/run.sh +++ b/heavyai/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/hydra/run.sh b/hydra/run.sh index ad95151ab..d6b36853b 100755 --- a/hydra/run.sh +++ b/hydra/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/infobright/run.sh b/infobright/run.sh index 36e5fa167..78ee1db19 100755 --- a/infobright/run.sh +++ b/infobright/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/kinetica/run.sh b/kinetica/run.sh index 968c8c6fd..13e03d14c 100755 --- a/kinetica/run.sh +++ b/kinetica/run.sh @@ -4,7 +4,7 @@ export KI_PWD=admin TRIES=3 QUERY_NUM=1 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do [ -z "$FQDN" ] && sync [ -z "$FQDN" ] && echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/mariadb-columnstore/run.sh b/mariadb-columnstore/run.sh index b917a815d..55534aa39 100755 --- a/mariadb-columnstore/run.sh +++ b/mariadb-columnstore/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/mariadb/run.sh b/mariadb/run.sh index 5f80a539b..def657cf6 100755 --- a/mariadb/run.sh +++ b/mariadb/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/monetdb/run.sh b/monetdb/run.sh index c1eadeab2..c84e86027 100755 --- a/monetdb/run.sh +++ b/monetdb/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/motherduck/run.sh b/motherduck/run.sh index d71b63120..e519ca985 100755 --- a/motherduck/run.sh +++ b/motherduck/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do ./query.py <<< "${query}" done diff --git a/mysql-myisam/run.sh b/mysql-myisam/run.sh index 31f31682f..c39b3ead2 100755 --- a/mysql-myisam/run.sh +++ b/mysql-myisam/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/mysql/run.sh b/mysql/run.sh index 31f31682f..c39b3ead2 100755 --- a/mysql/run.sh +++ b/mysql/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/oxla/run.sh b/oxla/run.sh index b222b6cd8..5110faa2b 100755 --- a/oxla/run.sh +++ b/oxla/run.sh @@ -2,7 +2,7 @@ TRIES=3 rm result.txt 2>/dev/null -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches 1>/dev/null diff --git a/paradedb/run.sh b/paradedb/run.sh index 1113eacc7..c89a45e36 100755 --- a/paradedb/run.sh +++ b/paradedb/run.sh @@ -3,7 +3,7 @@ TRIES=3 export PGPASSWORD='postgres' -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/pinot/run.sh b/pinot/run.sh index 9434c6ee8..5f5ea4976 100755 --- a/pinot/run.sh +++ b/pinot/run.sh @@ -1,7 +1,7 @@ #!/bin/bash TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null echo -n "[" diff --git a/postgresql-tuned/run.sh b/postgresql-tuned/run.sh index ad95151ab..d6b36853b 100755 --- a/postgresql-tuned/run.sh +++ b/postgresql-tuned/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/postgresql/run.sh b/postgresql/run.sh index ad95151ab..d6b36853b 100755 --- a/postgresql/run.sh +++ b/postgresql/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/questdb-partitioned/run.sh b/questdb-partitioned/run.sh index c3e581efc..0159343fd 100755 --- a/questdb-partitioned/run.sh +++ b/questdb-partitioned/run.sh @@ -6,7 +6,7 @@ questdb/bin/questdb.sh stop questdb/bin/questdb.sh start sleep 5 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/questdb/run.sh b/questdb/run.sh index c3e581efc..0159343fd 100755 --- a/questdb/run.sh +++ b/questdb/run.sh @@ -6,7 +6,7 @@ questdb/bin/questdb.sh stop questdb/bin/questdb.sh start sleep 5 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/redshift-serverless/run.sh b/redshift-serverless/run.sh index cafec743c..f4ec7705b 100755 --- a/redshift-serverless/run.sh +++ b/redshift-serverless/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo "$query"; for i in $(seq 1 $TRIES); do psql -h "${FQDN}" -U awsuser -d dev -p 5439 -t -c 'SET enable_result_cache_for_session = off' -c '\timing' -c "$query" | grep 'Time' diff --git a/redshift/run.sh b/redshift/run.sh index cafec743c..f4ec7705b 100755 --- a/redshift/run.sh +++ b/redshift/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo "$query"; for i in $(seq 1 $TRIES); do psql -h "${FQDN}" -U awsuser -d dev -p 5439 -t -c 'SET enable_result_cache_for_session = off' -c '\timing' -c "$query" | grep 'Time' diff --git a/singlestore/run.sh b/singlestore/run.sh index 1ccaeffed..223d18b3c 100755 --- a/singlestore/run.sh +++ b/singlestore/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/singlestore/run_on_cloud.sh b/singlestore/run_on_cloud.sh index b3245e697..3622e86a6 100755 --- a/singlestore/run_on_cloud.sh +++ b/singlestore/run_on_cloud.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync mysql -u admin -h $ENDPOINT -P 3306 --default-auth=mysql_native_password --database=test -vvv -e "${query}" diff --git a/sqlite/run.sh b/sqlite/run.sh index e3eccc1cc..fee79bf7f 100755 --- a/sqlite/run.sh +++ b/sqlite/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/tablespace/run.sh b/tablespace/run.sh index e5382d4a3..9107bb353 100644 --- a/tablespace/run.sh +++ b/tablespace/run.sh @@ -4,7 +4,7 @@ TRIES=3 HOSTNAME="" PASSWORD="" -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/tembo-olap/run.sh b/tembo-olap/run.sh index f13c1b13c..bb936f810 100755 --- a/tembo-olap/run.sh +++ b/tembo-olap/run.sh @@ -5,7 +5,7 @@ PASSWORD=$2 TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do echo "$query"; for i in $(seq 1 $TRIES); do psql "host=$HOSTNAME port=5432 dbname=test user=postgres password=$PASSWORD sslmode=require" -t -c '\timing' -c "$query" | grep 'Time' diff --git a/timescaledb-compressed/run.sh b/timescaledb-compressed/run.sh index 198ab5461..493a3bff9 100755 --- a/timescaledb-compressed/run.sh +++ b/timescaledb-compressed/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/timescaledb/run.sh b/timescaledb/run.sh index 84edae1f3..d9a6cd459 100755 --- a/timescaledb/run.sh +++ b/timescaledb/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches diff --git a/umbra/run.sh b/umbra/run.sh index 2329d3a61..1f904fe92 100755 --- a/umbra/run.sh +++ b/umbra/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches umbra/bin/server -createSSLFiles -certFile db/umbra.cert -keyFile db/umbra.pem -address 0.0.0.0 db/umbra.db &> umbra.log & diff --git a/versions/scripts/benchmarks.sh b/versions/scripts/benchmarks.sh index 4e6baff42..435bb72da 100644 --- a/versions/scripts/benchmarks.sh +++ b/versions/scripts/benchmarks.sh @@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} --query 'SELECT version();' echo "Brown Benchmark:" -cat "$BROWN_QUERIES_FILE" | while read query; do +cat "$BROWN_QUERIES_FILE" | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null @@ -36,7 +36,7 @@ done echo "SSB Benchmark:" -cat "$SSB_QUERIES_FILE" | while read query; do +cat "$SSB_QUERIES_FILE" | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null @@ -52,7 +52,7 @@ done echo "ClickHouse Benchmark:" -cat "$CH_QUERIES_FILE" | sed "s/{table}/${CH_TABLE}/g" | while read query; do +cat "$CH_QUERIES_FILE" | sed "s/{table}/${CH_TABLE}/g" | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null diff --git a/versions/unified_scripts/benchmarks.sh b/versions/unified_scripts/benchmarks.sh index 141743bc7..5dccb5303 100755 --- a/versions/unified_scripts/benchmarks.sh +++ b/versions/unified_scripts/benchmarks.sh @@ -7,7 +7,7 @@ TRIES=5 for i in {1..10}; do ${CLICKHOUSE_CLIENT} --query 'SELECT version();' > /dev/null && break || sleep 1; done -cat "all_queries.sql" | while read query; do +cat "all_queries.sql" | while read -r query; do [ -z "$HOST" ] && sync if [ -z "$HOST" ]; then echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null; diff --git a/vertica/run.sh b/vertica/run.sh index 7638dbb02..ffb78d3dc 100755 --- a/vertica/run.sh +++ b/vertica/run.sh @@ -2,7 +2,7 @@ TRIES=3 -cat queries.sql | while read query; do +cat queries.sql | while read -r query; do sync echo 3 | sudo tee /proc/sys/vm/drop_caches