From 68619246d78d37afea86146c3db751e24e7cd30b Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Mon, 26 Aug 2024 16:04:25 +0000 Subject: [PATCH 01/48] Updated script in cluster-init.py to remove version specific content --- t/cluster-init.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/t/cluster-init.py b/t/cluster-init.py index 7d173ba..e6afd04 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -18,12 +18,13 @@ repuser=os.getenv("EDGE_REPUSER","susan") repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=("EDGE_SPOCK_DEFAULT_VER","3.3.6") +spockpinver=("EDGE_SPOCK_PINNED_VER","3.3.6") dbname=os.getenv("EDGE_DB","lcdb") cwd=os.getcwd() num_nodes=3 - #print("*"*100) print(f"home_dir = {home_dir}\n") @@ -43,21 +44,11 @@ data["node_groups"][1]["path"] = new_path_1 data["node_groups"][2]["path"] = new_path_2 - - -#with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: -# data = json.load(file) -# #print(data) -# data["node_groups"][0]["nodes"][0]["path"] = new_path_0 -# data["node_groups"][1]["nodes"][0]["path"] = new_path_1 -# data["node_groups"][2]["nodes"][0]["path"] = new_path_2 - newdata = json.dumps(data, indent=4) with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: file.write(newdata) - command = (f"cluster init {cluster_name}") init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") print(f"init = {init.stdout}\n") From 2a3bbbb26a464b21836a5e84dc5f54c3728a6af0 Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Mon, 26 Aug 2024 18:53:20 +0000 Subject: [PATCH 02/48] Working on spock_4.0 schedule and test cases --- schedule_files/spock_4.0 | 33 +++++-- t/cluster-init-force-spock-version.py | 2 +- t/spock_exception_table_case1.py | 118 ++++++++++++++++++++++++++ t/spock_exception_table_case2.py | 118 ++++++++++++++++++++++++++ t/spock_exception_table_case3.py | 118 ++++++++++++++++++++++++++ 5 files changed, 379 insertions(+), 10 deletions(-) create mode 100644 t/spock_exception_table_case1.py create mode 100644 t/spock_exception_table_case2.py create mode 100644 t/spock_exception_table_case3.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 467df5b..9f1dbce 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -1,19 +1,34 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl +#t/020_nodectl_install_pgedge.pl ## Setup scripts for lower level directory -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +#t/8000a_env_setup_pgedge_node1.pl +#t/8001a_env_setup_pgedge_node2.pl +#t/8000b_install_pgedge_node1.pl +#t/8001b_install_pgedge_node2.pl ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +#t/8051_env_create_node1.pl +#t/8052_env_create_node2.pl + +## Spock repair mode functionality +#t/spock_repair_function.py + +## Remove components, Clean environment and free ports +#t/8998_env_remove_pgedge_node1.pl +#t/8999_env_remove_pgedge_node2.pl -## Spock 4.0 Scripts -t/spock_repair_function.py +# Delete the nc directory and pgpass file +#t/9998_remove_nc_and_pgpass_dirs.py + +## Set up a two node cluster +t/020_nodectl_install_pgedge.pl +t/cluster-init.py +## Exception table functionality +t/spock_exception_table_case1.py +t/spock_exception_table_case2.py +t/spock_exception_table_case3.py ## Remove components, Clean environment and free ports t/8998_env_remove_pgedge_node1.pl diff --git a/t/cluster-init-force-spock-version.py b/t/cluster-init-force-spock-version.py index 369881d..b875b36 100644 --- a/t/cluster-init-force-spock-version.py +++ b/t/cluster-init-force-spock-version.py @@ -64,7 +64,7 @@ # Needle and Haystack # Confirm the command worked by looking for: -if "\nSyntaxError" not in str(init.stdout) or init.returncode == 1: +if "[FAILED]" not in str(init.stdout) or init.returncode == 1: util_test.EXIT_PASS() else: diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case1.py new file mode 100644 index 0000000..f6b9362 --- /dev/null +++ b/t/spock_exception_table_case1.py @@ -0,0 +1,118 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +port2=port1+1 +print(port2) + +## pgbench-install on n1 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") +print(f"The installation on n1 returns: {res}") +print("*"*100) + +## pgbench-install in n2 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") +print(f"The installation on n2 returns: {res}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n1 +## confirm with SELECT * FROM spock.tables. +row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") +check=util_test.contains((row),"default") +print(f"The n1 check returns: {row}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n2. +## confirm with SELECT * FROM pgbench_branches on n2. +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +#check=util_test.contains((row),"default") +print(f"The n2 check returns: {row}") +print("*"*100) + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO pgbench_branches VALUES (2, 70000, null); +END $$; +""" + +print(anon_block) + +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(row1) + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(row2) + +print("*"*100) + +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"The update to bid 2 returns: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1 +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n1 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") +print(f"On n1, pgbench branches contains: {row}") +print("*"*100) + +## Show that the row update made it to n2 without a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") +print(f"On n2, pgbench branches contains: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case2.py new file mode 100644 index 0000000..f6b9362 --- /dev/null +++ b/t/spock_exception_table_case2.py @@ -0,0 +1,118 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +port2=port1+1 +print(port2) + +## pgbench-install on n1 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") +print(f"The installation on n1 returns: {res}") +print("*"*100) + +## pgbench-install in n2 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") +print(f"The installation on n2 returns: {res}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n1 +## confirm with SELECT * FROM spock.tables. +row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") +check=util_test.contains((row),"default") +print(f"The n1 check returns: {row}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n2. +## confirm with SELECT * FROM pgbench_branches on n2. +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +#check=util_test.contains((row),"default") +print(f"The n2 check returns: {row}") +print("*"*100) + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO pgbench_branches VALUES (2, 70000, null); +END $$; +""" + +print(anon_block) + +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(row1) + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(row2) + +print("*"*100) + +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"The update to bid 2 returns: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1 +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n1 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") +print(f"On n1, pgbench branches contains: {row}") +print("*"*100) + +## Show that the row update made it to n2 without a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") +print(f"On n2, pgbench branches contains: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case3.py new file mode 100644 index 0000000..f6b9362 --- /dev/null +++ b/t/spock_exception_table_case3.py @@ -0,0 +1,118 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +port2=port1+1 +print(port2) + +## pgbench-install on n1 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") +print(f"The installation on n1 returns: {res}") +print("*"*100) + +## pgbench-install in n2 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") +print(f"The installation on n2 returns: {res}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n1 +## confirm with SELECT * FROM spock.tables. +row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") +check=util_test.contains((row),"default") +print(f"The n1 check returns: {row}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n2. +## confirm with SELECT * FROM pgbench_branches on n2. +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +#check=util_test.contains((row),"default") +print(f"The n2 check returns: {row}") +print("*"*100) + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO pgbench_branches VALUES (2, 70000, null); +END $$; +""" + +print(anon_block) + +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(row1) + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(row2) + +print("*"*100) + +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"The update to bid 2 returns: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1 +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n1 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") +print(f"On n1, pgbench branches contains: {row}") +print("*"*100) + +## Show that the row update made it to n2 without a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") +print(f"On n2, pgbench branches contains: {row}") +print("*"*100) + +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +print(f"SELECT * FROM spock.exception_log returns: {row}") +print("*"*100) + + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + From de9fa88097f2a3305e8a65e90fd9672602dfe9b6 Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Tue, 27 Aug 2024 16:42:16 +0000 Subject: [PATCH 03/48] Adding case2 script for exception table --- t/spock_exception_table_case2.py | 66 +++++++++++++++++++------------- 1 file changed, 40 insertions(+), 26 deletions(-) diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case2.py index f6b9362..7c3d727 100644 --- a/t/spock_exception_table_case2.py +++ b/t/spock_exception_table_case2.py @@ -50,64 +50,78 @@ print(f"The n2 check returns: {row}") print("*"*100) +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"We inserted bid 22 on n1: {row}") +print("*"*100) + +## Look for our rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"Node n1 should contain bid 1/11/22: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"Node n2 should contain bid 1/11/22: {row2}") + +print("*"*100) + ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will -## add a row to n1 that will not be replicated to n2 +## add a row to n2 that will not be replicated to n1: anon_block = """ DO $$ BEGIN PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (2, 70000, null); + INSERT INTO pgbench_branches VALUES (33, 33000, null); END $$; """ print(anon_block) - -row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) print(row) -## Look for our row on n1 and n2: +## Check the rows on n1 and n2: row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) -print(row1) +print(f"We're in repair mode - n1 now contains 1/11/22: {row1}") row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -print(row2) +print(f"We're in repair mode - n2 now contains 1/11/22/33: {row2}") print("*"*100) -## Update the record that is out of sync, forcing a record into the exception table... -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) -print(f"The update to bid 2 returns: {row}") +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"We're in repair mode - the update to bid 33 on n2 returns: {row}") print("*"*100) -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") print(f"SELECT * FROM spock.exception_log returns: {row}") print("*"*100) -## Demonstrate that replication continues on n1 -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) -print(f"The update to bid 1 on n1 returns: {row}") -print("*"*100) - -## Show that the row update made it to n1 without causing a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") -print(f"On n1, pgbench branches contains: {row}") +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"The update to bid 11 on n1 returns: {row}") print("*"*100) -## Show that the row update made it to n2 without a death spiral: +## Show that the row update made it to n2 without causing a death spiral: row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") -print(f"On n2, pgbench branches contains: {row}") +print(f"bid 11 should be updated on n2, pgbench branches contains: {row}") print("*"*100) -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log returns: {row}") +## Read from the spock.exception_log on n1 (the update from of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) -if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): util_test.EXIT_PASS() else: From 023b8b73af456d49f053a7d766b84f3acbd94ae1 Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Tue, 27 Aug 2024 19:09:24 +0000 Subject: [PATCH 04/48] Updated spock_exception_table_case3.py for documented exception --- schedule_files/spock_4.0 | 28 +++++++----- t/spock_exception_table_case3.py | 75 ++++++++++++++++++++++---------- 2 files changed, 70 insertions(+), 33 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 9f1dbce..acd9b10 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -17,22 +17,30 @@ ## Remove components, Clean environment and free ports #t/8998_env_remove_pgedge_node1.pl #t/8999_env_remove_pgedge_node2.pl +#t/9998_remove_nc_and_pgpass_dirs.py + -# Delete the nc directory and pgpass file +## Exception table functionality - case1 +#t/020_nodectl_install_pgedge.pl +#t/cluster-init.py +#t/spock_exception_table_case1.py +#t/8998_env_remove_pgedge_node1.pl +#t/8999_env_remove_pgedge_node2.pl #t/9998_remove_nc_and_pgpass_dirs.py -## Set up a two node cluster +## Exception table functionality - case2 +#t/020_nodectl_install_pgedge.pl +#t/cluster-init.py +#t/spock_exception_table_case2.py +#t/8998_env_remove_pgedge_node1.pl +#t/8999_env_remove_pgedge_node2.pl +#t/9998_remove_nc_and_pgpass_dirs.py + +## Exception table functionality - case3 t/020_nodectl_install_pgedge.pl t/cluster-init.py - -## Exception table functionality -t/spock_exception_table_case1.py -t/spock_exception_table_case2.py t/spock_exception_table_case3.py - -## Remove components, Clean environment and free ports t/8998_env_remove_pgedge_node1.pl t/8999_env_remove_pgedge_node2.pl - -# Delete the nc directory and pgpass file t/9998_remove_nc_and_pgpass_dirs.py + diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case3.py index f6b9362..0d032c6 100644 --- a/t/spock_exception_table_case3.py +++ b/t/spock_exception_table_case3.py @@ -50,64 +50,93 @@ print(f"The n2 check returns: {row}") print("*"*100) +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"Node n1 should contain bid 1/11: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"Node n2 should contain bid 1/11: {row2}") + +print("*"*100) + ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will -## add a row to n1 that will not be replicated to n2 +## add a row to n2 that will not be replicated to n1: anon_block = """ DO $$ BEGIN PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (2, 70000, null); + INSERT INTO pgbench_branches VALUES (22, 22000, null); END $$; """ print(anon_block) - -row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) print(row) -## Look for our row on n1 and n2: +## Check the rows on n1 and n2: row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) -print(row1) +print(f"We're in repair mode - n1 now contains 1/11: {row1}") row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -print(row2) +print(f"We're in repair mode - n2 now contains 1/11/22: {row2}") print("*"*100) -## Update the record that is out of sync, forcing a record into the exception table... -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) -print(f"The update to bid 2 returns: {row}") +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") print("*"*100) -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") -print(f"SELECT * FROM spock.exception_log returns: {row}") +## Look for our rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"Node n1 should contain bid 1/11: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"Node n2 should contain bid 1/11/22: {row2}") + + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") print("*"*100) -## Demonstrate that replication continues on n1 -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) -print(f"The update to bid 1 on n1 returns: {row}") +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") print("*"*100) -## Show that the row update made it to n1 without causing a death spiral: +## Show that the row update hasn't caused a death spiral: row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") -print(f"On n1, pgbench branches contains: {row}") +print(f" n1 pgbench branches contains: {row}") print("*"*100) -## Show that the row update made it to n2 without a death spiral: +## Show that the row update hasn't caused a death spiral: row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") -print(f"On n2, pgbench branches contains: {row}") +print(f" n2 pgbench branches contains: {row}") print("*"*100) -## Read from the spock.exception_log; +## Read from the spock.exception_log on n2 for our needle/haystack step: row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log returns: {row}") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") print("*"*100) -if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): util_test.EXIT_PASS() else: From 203011b837e392f417222fcc2e5c09eb00aff867 Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Tue, 27 Aug 2024 16:50:46 -0400 Subject: [PATCH 05/48] First pass: standard setup/cleanup and one spock ver --- schedule_files/ace_basic | 8 +-- schedule_files/ace_functionality | 8 +-- schedule_files/ace_long | 8 +-- schedule_files/auto_ddl_schedule | 10 ++- schedule_files/cluster_schedule | 14 ++--- schedule_files/db_schedule | 15 ++--- schedule_files/filtering_schedule | 13 ++-- schedule_files/long-test | 42 +++++-------- schedule_files/northwind_schedule | 14 ++--- schedule_files/pgbench_schedule | 13 ++-- schedule_files/repset_tests | 11 ++-- schedule_files/service_schedule | 8 +-- schedule_files/short-test | 12 ++-- schedule_files/snowflake | 16 ++--- schedule_files/spock_4.0 | 14 ++--- schedule_files/sub_tests | 11 ++-- schedule_files/um_schedule | 7 ++- ...oderemove.py => cleanup_01_node_remove.py} | 0 t/cleanup_02_remove.py | 47 ++++++++++++++ ...02_pgremove.py => cleanup_03_remove_nc.py} | 0 t/cluster-init-force-spock-version.py | 3 +- t/lib/config.env | 6 +- .../020_nodectl_install_pgedge.pl | 0 .../020cf_nodectl_install_pgedge.pl | 0 t/{ => maybe_delete}/100_setup_script.pl | 0 t/{ => maybe_delete}/105_remove_pgedge.pl | 0 ...rvice_breakdown_with_control_check_pg16.pl | 0 t/{ => maybe_delete}/5001_cluster_build.py | 0 .../5002_build_cluster_with_n_nodes.py | 0 t/{ => maybe_delete}/5003_sub_env.py | 0 t/{ => maybe_delete}/5004_node_list.py | 0 t/{ => maybe_delete}/5005_rep_list.py | 0 t/{ => maybe_delete}/5006_sub_drop.py | 0 t/{ => maybe_delete}/5007_repset_drop.py | 0 t/{ => maybe_delete}/5008_node_drop.py | 0 .../8000a_env_setup_pgedge_node1.pl | 0 .../8000b_install_pgedge_node1.pl | 0 .../8001a_env_setup_pgedge_node2.pl | 0 .../8001b_install_pgedge_node2.pl | 0 .../8999_env_remove_pgedge_node2.pl | 0 .../8999b_env_remove_pgedge_node3.pl | 0 .../902_create_cluster-local_n1.pl | 0 .../903_create_cluster-local_n2.pl | 0 t/{ => maybe_delete}/906_check_node_one.pl | 0 t/{ => maybe_delete}/998_breakdown_cluster.pl | 0 .../9998_remove_nc_and_pgpass_dirs.py | 0 t/{ => maybe_delete}/999_gsg_breakdown.pl | 0 t/setup_02_setup.py | 61 +++++++++++++++++++ ...nodecreate.py => setup_03_node_install.py} | 0 ...p_03_noderun.py => setup_04_node_setup.py} | 7 ++- 50 files changed, 208 insertions(+), 140 deletions(-) rename t/{cleanup_01_noderemove.py => cleanup_01_node_remove.py} (100%) create mode 100644 t/cleanup_02_remove.py rename t/{cleanup_02_pgremove.py => cleanup_03_remove_nc.py} (100%) rename t/{ => maybe_delete}/020_nodectl_install_pgedge.pl (100%) rename t/{ => maybe_delete}/020cf_nodectl_install_pgedge.pl (100%) rename t/{ => maybe_delete}/100_setup_script.pl (100%) rename t/{ => maybe_delete}/105_remove_pgedge.pl (100%) rename t/{ => maybe_delete}/2990_service_breakdown_with_control_check_pg16.pl (100%) rename t/{ => maybe_delete}/5001_cluster_build.py (100%) rename t/{ => maybe_delete}/5002_build_cluster_with_n_nodes.py (100%) rename t/{ => maybe_delete}/5003_sub_env.py (100%) rename t/{ => maybe_delete}/5004_node_list.py (100%) rename t/{ => maybe_delete}/5005_rep_list.py (100%) rename t/{ => maybe_delete}/5006_sub_drop.py (100%) rename t/{ => maybe_delete}/5007_repset_drop.py (100%) rename t/{ => maybe_delete}/5008_node_drop.py (100%) rename t/{ => maybe_delete}/8000a_env_setup_pgedge_node1.pl (100%) rename t/{ => maybe_delete}/8000b_install_pgedge_node1.pl (100%) rename t/{ => maybe_delete}/8001a_env_setup_pgedge_node2.pl (100%) rename t/{ => maybe_delete}/8001b_install_pgedge_node2.pl (100%) rename t/{ => maybe_delete}/8999_env_remove_pgedge_node2.pl (100%) rename t/{ => maybe_delete}/8999b_env_remove_pgedge_node3.pl (100%) rename t/{ => maybe_delete}/902_create_cluster-local_n1.pl (100%) rename t/{ => maybe_delete}/903_create_cluster-local_n2.pl (100%) rename t/{ => maybe_delete}/906_check_node_one.pl (100%) rename t/{ => maybe_delete}/998_breakdown_cluster.pl (100%) rename t/{ => maybe_delete}/9998_remove_nc_and_pgpass_dirs.py (100%) rename t/{ => maybe_delete}/999_gsg_breakdown.pl (100%) create mode 100644 t/setup_02_setup.py rename t/{setup_02_nodecreate.py => setup_03_node_install.py} (100%) rename t/{setup_03_noderun.py => setup_04_node_setup.py} (91%) diff --git a/schedule_files/ace_basic b/schedule_files/ace_basic index cf080e2..9fa9b13 100644 --- a/schedule_files/ace_basic +++ b/schedule_files/ace_basic @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/ace_01_setup.py @@ -19,5 +19,5 @@ t/ace_60_table_repair.py t/ace_61_table_repair_errors.py t/ace_99_cleanup.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/ace_functionality b/schedule_files/ace_functionality index 53adc70..f774bfc 100644 --- a/schedule_files/ace_functionality +++ b/schedule_files/ace_functionality @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/ace_70_functionality.py @@ -9,5 +9,5 @@ t/ace_72_edge_cases.py t/ace_73_bigloop.py t/ace_74_diff_files.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/ace_long b/schedule_files/ace_long index b8d4260..4d1e36f 100644 --- a/schedule_files/ace_long +++ b/schedule_files/ace_long @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/spock_1_setup.py @@ -31,5 +31,5 @@ t/ace_73_bigloop.py t/ace_74_diff_files.py t/ace_99_cleanup.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/auto_ddl_schedule b/schedule_files/auto_ddl_schedule index 4940ce4..f347b1f 100644 --- a/schedule_files/auto_ddl_schedule +++ b/schedule_files/auto_ddl_schedule @@ -1,10 +1,9 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # node creation @@ -64,5 +63,4 @@ t/8087_env_node_drop_n2.pl ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py diff --git a/schedule_files/cluster_schedule b/schedule_files/cluster_schedule index 869783b..0d855f5 100644 --- a/schedule_files/cluster_schedule +++ b/schedule_files/cluster_schedule @@ -1,5 +1,5 @@ ## Set up tests for a two node cluster -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py t/cluster-create-json.py t/cluster-json-validate.py t/cluster-json-invalid-file.py @@ -12,16 +12,14 @@ t/cluster-init-bad-json.py t/cluster-replication-check.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py ## Multi-node cluster tests -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py t/multi-db_cluster_setup.py t/multi-db_cluster_exercise_ace.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/db_schedule b/schedule_files/db_schedule index c0acf64..3e327de 100644 --- a/schedule_files/db_schedule +++ b/schedule_files/db_schedule @@ -1,10 +1,8 @@ ## # -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -19,8 +17,5 @@ t/db-guc-set-no-reload.py t/db-guc-set-invalid-value.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - - +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/filtering_schedule b/schedule_files/filtering_schedule index 7b84a8c..f95798f 100644 --- a/schedule_files/filtering_schedule +++ b/schedule_files/filtering_schedule @@ -1,12 +1,10 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py #t/300_setup_script.pl ## Setup scripts for lower level directory -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_03_node_install.py +t/setup_04_node_setup.py ## t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -20,8 +18,7 @@ t/row_filtering.pl t/partition_filtering.pl ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/long-test b/schedule_files/long-test index 0e687ae..189c268 100644 --- a/schedule_files/long-test +++ b/schedule_files/long-test @@ -1,6 +1,6 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl +t/setup_01_install.py +t/setup_02_setup.py #t/get_info.py ## Test Service Module @@ -271,27 +271,22 @@ t/spock_7_negative_list.py t/spock_8_negative_create.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py ## Run the multi-db cluster tests -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py t/multi-db_cluster_setup.py t/multi-db_cluster_exercise_ace.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py ## Setup for Filtering Tests - -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl t/6000_setup_sub_create_n1n2_n1.pl @@ -304,16 +299,13 @@ t/row_filtering.pl t/partition_filtering.pl ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py ## Setup for Snowflake Tests -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -324,8 +316,6 @@ t/snowflake_script.py t/snowflake_spock_cmds.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/northwind_schedule b/schedule_files/northwind_schedule index e72cc0e..6025202 100644 --- a/schedule_files/northwind_schedule +++ b/schedule_files/northwind_schedule @@ -1,13 +1,10 @@ ## -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl - #northwind-install commands # t/northwind-install.py @@ -29,6 +26,5 @@ t/northwind-remove_no_dbname.py t/northwind-remove_invalid_dbname.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/pgbench_schedule b/schedule_files/pgbench_schedule index 358bdf5..db0e852 100644 --- a/schedule_files/pgbench_schedule +++ b/schedule_files/pgbench_schedule @@ -1,9 +1,7 @@ ## -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -33,6 +31,5 @@ t/pgbench-validate_invalid_dbname.py t/pgbench-remove_no_dbname.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/repset_tests b/schedule_files/repset_tests index 5360422..675ccbf 100644 --- a/schedule_files/repset_tests +++ b/schedule_files/repset_tests @@ -1,10 +1,9 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # node creation ## @@ -134,5 +133,5 @@ t/spock_repset_create_error_3.py ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/service_schedule b/schedule_files/service_schedule index 595d5fc..0a69bca 100644 --- a/schedule_files/service_schedule +++ b/schedule_files/service_schedule @@ -1,6 +1,6 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl +t/setup_01_install.py +t/setup_02_setup.py ## Test Service Module t/service_reload_component.pl @@ -14,5 +14,5 @@ t/service_enable_error.pl t/399_um_breakdown_script.pl -# 9998X.py to deletes the nc and pgpass directory, etc. -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_02_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/short-test b/schedule_files/short-test index 83dd9e7..4239e9d 100644 --- a/schedule_files/short-test +++ b/schedule_files/short-test @@ -1,8 +1,7 @@ -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/cluster_1_gen_json.py t/spock_1_setup.py t/spock_2_node_create.py @@ -10,4 +9,5 @@ t/spock_3_sub_create.py t/spock_4_repset_add_table.py t/spock_5_cofirm_replication.py -t/spock_99_cleanup.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/snowflake b/schedule_files/snowflake index 2b432c9..de782e4 100644 --- a/schedule_files/snowflake +++ b/schedule_files/snowflake @@ -1,11 +1,8 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py - -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -14,10 +11,7 @@ t/snowflake_script.py t/snowflake_spock_cmds.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py - - +t/cleanup_03_remove_nc.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 467df5b..6b14dd2 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -1,13 +1,10 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## Setup scripts for lower level directory -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -## t/8051_env_create_node1.pl t/8052_env_create_node2.pl @@ -16,8 +13,7 @@ t/spock_repair_function.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/sub_tests b/schedule_files/sub_tests index 9414fde..6e7b76e 100644 --- a/schedule_files/sub_tests +++ b/schedule_files/sub_tests @@ -1,10 +1,9 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # error tests @@ -48,5 +47,5 @@ t/spock_sub_create_synch_all_n2.py ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/um_schedule b/schedule_files/um_schedule index 9b7e004..7f42a55 100644 --- a/schedule_files/um_schedule +++ b/schedule_files/um_schedule @@ -1,9 +1,10 @@ -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl +t/setup_01_install.py +t/setup_02_setup.py t/um1_install_available_components.py #t/um_install_available_components.py #t/um_update_available_components.py #t/um_remove_available_components.py -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_02_remove.py +t/cleanup_03_remove_nc.py diff --git a/t/cleanup_01_noderemove.py b/t/cleanup_01_node_remove.py similarity index 100% rename from t/cleanup_01_noderemove.py rename to t/cleanup_01_node_remove.py diff --git a/t/cleanup_02_remove.py b/t/cleanup_02_remove.py new file mode 100644 index 0000000..9d25202 --- /dev/null +++ b/t/cleanup_02_remove.py @@ -0,0 +1,47 @@ +import sys, os, util_test, subprocess +import json + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() + +ncdir = os.getenv("NC_DIR") +homedir = os.getenv("EDGE_HOME_DIR") +clusterdir = os.getenv('EDGE_CLUSTER_DIR') +numnodes = int(os.getenv('EDGE_NODES')) +pgname = os.getenv('EDGE_COMPONENT') + +## First Cleanup Script- Removes Nodes + +nodedir = os.path.join(clusterdir, f"nc", "pgedge") + +cmd_node = f"remove {pgname} --rm-data" +res=util_test.run_nc_cmd("Remove", cmd_node, nodedir) +util_test.printres(res) +if res.returncode != 0: + util_test.exit_message(f"Couldn't remove nc node") + +modules = { + pgname: False, + f"snowflake-{pgname}": False, + f"spock33-{pgname}": False +} + +cmd_node = f"um list" +res=util_test.run_nc_cmd("List", cmd_node, nodedir) +util_test.printres(res) + +for line in res.stdout.strip().split("\\n"): + for key in modules.keys(): + if key in line and "Installed" in line: + modules[key] = True + +for key in modules.keys(): + if modules[key]: + util_test.exit_message(f"Faild, module {key} still installed") + else: + print(f"Module {key} was removed") + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) diff --git a/t/cleanup_02_pgremove.py b/t/cleanup_03_remove_nc.py similarity index 100% rename from t/cleanup_02_pgremove.py rename to t/cleanup_03_remove_nc.py diff --git a/t/cluster-init-force-spock-version.py b/t/cluster-init-force-spock-version.py index 369881d..b6a18e1 100644 --- a/t/cluster-init-force-spock-version.py +++ b/t/cluster-init-force-spock-version.py @@ -18,8 +18,7 @@ repuser=os.getenv("EDGE_REPUSER","susan") repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") -spockver=("EDGE_SPOCK_DEFAULT_VER","3.3.6") -spockpinver=("EDGE_SPOCK_PINNED_VER","3.3.6") +spockver=("EDGE_SPOCK_VER","3.3.6") dbname=os.getenv("EDGE_DB","lcdb") cwd=os.getcwd() diff --git a/t/lib/config.env b/t/lib/config.env index 5631b36..c6a664e 100644 --- a/t/lib/config.env +++ b/t/lib/config.env @@ -30,11 +30,9 @@ export EDGE_REPUSER=`whoami` export EDGE_INST_VERSION=16 export EDGE_COMPONENT="pg$EDGE_INST_VERSION" -# spock version to install, if pinned_ver has a value, it will be prioritised over default_ver -# keep pinned_ver empty if you want to use spocks default version +# spock_ver empty if you want to use spocks default version # As of 1st August 2024, spock40 is the default pinned version -export EDGE_SPOCK_DEFAULT_VER="4.0" -export EDGE_SPOCK_PINNED_VER="" +export EDGE_SPOCK_VER="" export EDGE_CLI="pgedge" diff --git a/t/020_nodectl_install_pgedge.pl b/t/maybe_delete/020_nodectl_install_pgedge.pl similarity index 100% rename from t/020_nodectl_install_pgedge.pl rename to t/maybe_delete/020_nodectl_install_pgedge.pl diff --git a/t/020cf_nodectl_install_pgedge.pl b/t/maybe_delete/020cf_nodectl_install_pgedge.pl similarity index 100% rename from t/020cf_nodectl_install_pgedge.pl rename to t/maybe_delete/020cf_nodectl_install_pgedge.pl diff --git a/t/100_setup_script.pl b/t/maybe_delete/100_setup_script.pl similarity index 100% rename from t/100_setup_script.pl rename to t/maybe_delete/100_setup_script.pl diff --git a/t/105_remove_pgedge.pl b/t/maybe_delete/105_remove_pgedge.pl similarity index 100% rename from t/105_remove_pgedge.pl rename to t/maybe_delete/105_remove_pgedge.pl diff --git a/t/2990_service_breakdown_with_control_check_pg16.pl b/t/maybe_delete/2990_service_breakdown_with_control_check_pg16.pl similarity index 100% rename from t/2990_service_breakdown_with_control_check_pg16.pl rename to t/maybe_delete/2990_service_breakdown_with_control_check_pg16.pl diff --git a/t/5001_cluster_build.py b/t/maybe_delete/5001_cluster_build.py similarity index 100% rename from t/5001_cluster_build.py rename to t/maybe_delete/5001_cluster_build.py diff --git a/t/5002_build_cluster_with_n_nodes.py b/t/maybe_delete/5002_build_cluster_with_n_nodes.py similarity index 100% rename from t/5002_build_cluster_with_n_nodes.py rename to t/maybe_delete/5002_build_cluster_with_n_nodes.py diff --git a/t/5003_sub_env.py b/t/maybe_delete/5003_sub_env.py similarity index 100% rename from t/5003_sub_env.py rename to t/maybe_delete/5003_sub_env.py diff --git a/t/5004_node_list.py b/t/maybe_delete/5004_node_list.py similarity index 100% rename from t/5004_node_list.py rename to t/maybe_delete/5004_node_list.py diff --git a/t/5005_rep_list.py b/t/maybe_delete/5005_rep_list.py similarity index 100% rename from t/5005_rep_list.py rename to t/maybe_delete/5005_rep_list.py diff --git a/t/5006_sub_drop.py b/t/maybe_delete/5006_sub_drop.py similarity index 100% rename from t/5006_sub_drop.py rename to t/maybe_delete/5006_sub_drop.py diff --git a/t/5007_repset_drop.py b/t/maybe_delete/5007_repset_drop.py similarity index 100% rename from t/5007_repset_drop.py rename to t/maybe_delete/5007_repset_drop.py diff --git a/t/5008_node_drop.py b/t/maybe_delete/5008_node_drop.py similarity index 100% rename from t/5008_node_drop.py rename to t/maybe_delete/5008_node_drop.py diff --git a/t/8000a_env_setup_pgedge_node1.pl b/t/maybe_delete/8000a_env_setup_pgedge_node1.pl similarity index 100% rename from t/8000a_env_setup_pgedge_node1.pl rename to t/maybe_delete/8000a_env_setup_pgedge_node1.pl diff --git a/t/8000b_install_pgedge_node1.pl b/t/maybe_delete/8000b_install_pgedge_node1.pl similarity index 100% rename from t/8000b_install_pgedge_node1.pl rename to t/maybe_delete/8000b_install_pgedge_node1.pl diff --git a/t/8001a_env_setup_pgedge_node2.pl b/t/maybe_delete/8001a_env_setup_pgedge_node2.pl similarity index 100% rename from t/8001a_env_setup_pgedge_node2.pl rename to t/maybe_delete/8001a_env_setup_pgedge_node2.pl diff --git a/t/8001b_install_pgedge_node2.pl b/t/maybe_delete/8001b_install_pgedge_node2.pl similarity index 100% rename from t/8001b_install_pgedge_node2.pl rename to t/maybe_delete/8001b_install_pgedge_node2.pl diff --git a/t/8999_env_remove_pgedge_node2.pl b/t/maybe_delete/8999_env_remove_pgedge_node2.pl similarity index 100% rename from t/8999_env_remove_pgedge_node2.pl rename to t/maybe_delete/8999_env_remove_pgedge_node2.pl diff --git a/t/8999b_env_remove_pgedge_node3.pl b/t/maybe_delete/8999b_env_remove_pgedge_node3.pl similarity index 100% rename from t/8999b_env_remove_pgedge_node3.pl rename to t/maybe_delete/8999b_env_remove_pgedge_node3.pl diff --git a/t/902_create_cluster-local_n1.pl b/t/maybe_delete/902_create_cluster-local_n1.pl similarity index 100% rename from t/902_create_cluster-local_n1.pl rename to t/maybe_delete/902_create_cluster-local_n1.pl diff --git a/t/903_create_cluster-local_n2.pl b/t/maybe_delete/903_create_cluster-local_n2.pl similarity index 100% rename from t/903_create_cluster-local_n2.pl rename to t/maybe_delete/903_create_cluster-local_n2.pl diff --git a/t/906_check_node_one.pl b/t/maybe_delete/906_check_node_one.pl similarity index 100% rename from t/906_check_node_one.pl rename to t/maybe_delete/906_check_node_one.pl diff --git a/t/998_breakdown_cluster.pl b/t/maybe_delete/998_breakdown_cluster.pl similarity index 100% rename from t/998_breakdown_cluster.pl rename to t/maybe_delete/998_breakdown_cluster.pl diff --git a/t/9998_remove_nc_and_pgpass_dirs.py b/t/maybe_delete/9998_remove_nc_and_pgpass_dirs.py similarity index 100% rename from t/9998_remove_nc_and_pgpass_dirs.py rename to t/maybe_delete/9998_remove_nc_and_pgpass_dirs.py diff --git a/t/999_gsg_breakdown.pl b/t/maybe_delete/999_gsg_breakdown.pl similarity index 100% rename from t/999_gsg_breakdown.pl rename to t/maybe_delete/999_gsg_breakdown.pl diff --git a/t/setup_02_setup.py b/t/setup_02_setup.py new file mode 100644 index 0000000..7208756 --- /dev/null +++ b/t/setup_02_setup.py @@ -0,0 +1,61 @@ +import sys, os, util_test, subprocess +import json + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() + +ncdir = os.getenv("NC_DIR") +homedir = os.getenv("EDGE_HOME_DIR") +clusterdir = os.getenv('EDGE_CLUSTER_DIR') +numnodes = int(os.getenv('EDGE_NODES')) +clicommand = os.getenv('EDGE_CLI') +pgusn = os.getenv('EDGE_USERNAME') +pgpsw = os.getenv('EDGE_PASSWORD') +dbname = os.getenv('EDGE_DB') +startport = int(os.getenv('EDGE_START_PORT')) +pgversion = os.getenv('EDGE_INST_VERSION') +pgname = os.getenv('EDGE_COMPONENT') +spockver = os.getenv('EDGE_SPOCK_VER') + +## Second Setup Script- Setup pgEdge Single Instance for Testing + +os.chdir(os.path.join(f"nc", "pgedge")) + +# Deletes copydir +cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport} --pg_ver {pgversion}" + +if spockver: + cmd_node = f"{cmd_node} --spock_ver \"{spockver}\"" +res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) +util_test.printres(res) +if res.returncode == 1: + util_test.exit_message(f"Faild {cmd_node}") +if "already installed" in res.stdout: + print("PG Already Running on Node") + +modules = { + pgname: False, + f"snowflake-{pgname}": False, + f"spock33-{pgname}": False +} + +cmd_node = f"./{clicommand} um list" +res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) +util_test.printres(res) + +for line in res.stdout.strip().split("\\n"): + for key in modules.keys(): + if key in line and "Installed" in line: + modules[key] = True + +for key in modules.keys(): + if modules[key]: + print(f"Module {key} is installed") + else: + util_test.exit_message(f"Faild, module {key} not installed") + +os.chdir("../..") +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) diff --git a/t/setup_02_nodecreate.py b/t/setup_03_node_install.py similarity index 100% rename from t/setup_02_nodecreate.py rename to t/setup_03_node_install.py diff --git a/t/setup_03_noderun.py b/t/setup_04_node_setup.py similarity index 91% rename from t/setup_03_noderun.py rename to t/setup_04_node_setup.py index 95f8308..e43ca15 100644 --- a/t/setup_03_noderun.py +++ b/t/setup_04_node_setup.py @@ -18,7 +18,7 @@ startport = int(os.getenv('EDGE_START_PORT')) pgversion = os.getenv('EDGE_INST_VERSION') pgname = os.getenv('EDGE_COMPONENT') -spockver = os.getenv('EDGE_SPOCK_DEFAULT_VER') +spockver = os.getenv('EDGE_SPOCK_VER') ## Third Setup Script- Turns on Nodes for Testing @@ -27,7 +27,10 @@ os.chdir(os.path.join(f"n{n}", "pgedge")) # Deletes copydir - cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport + n - 1} --pg_ver {pgversion} --spock_ver \"{spockver}\"" + cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport + n - 1} --pg_ver {pgversion}" + if spockver: + cmd_node = f"{cmd_node} --spock_ver \"{spockver}\"" + res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) util_test.printres(res) if res.returncode == 1: From f925286c13fc907a9e802c3d5cea460f83d4f0ba Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Tue, 27 Aug 2024 21:01:46 +0000 Subject: [PATCH 06/48] Stashing WIP --- schedule_files/spock_4.0 | 9 ++ t/spock_exception_table_case4.py | 161 +++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+) create mode 100644 t/spock_exception_table_case4.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index acd9b10..6ff278b 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -44,3 +44,12 @@ t/8998_env_remove_pgedge_node1.pl t/8999_env_remove_pgedge_node2.pl t/9998_remove_nc_and_pgpass_dirs.py +## Exception table functionality - case4 +t/020_nodectl_install_pgedge.pl +t/cluster-init.py +t/spock_exception_table_case3.py +t/8998_env_remove_pgedge_node1.pl +t/8999_env_remove_pgedge_node2.pl +t/9998_remove_nc_and_pgpass_dirs.py + + diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py new file mode 100644 index 0000000..94b8966 --- /dev/null +++ b/t/spock_exception_table_case4.py @@ -0,0 +1,161 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +port2=port1+1 +print(port2) + +## pgbench-install on n1 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") +print(f"The installation on n1 returns: {res}") +print("*"*100) + +## pgbench-install in n2 +## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset +cmd_node = f"app pgbench-install {dbname} -r default" +res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") +print(f"The installation on n2 returns: {res}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n1 +## confirm with SELECT * FROM spock.tables. +row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") +check=util_test.contains((row),"default") +print(f"The n1 check returns: {row}") +print("*"*100) + +## Use needle/haystack to confirm pgbench is installed on n2. +## confirm with SELECT * FROM pgbench_branches on n2. +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +#check=util_test.contains((row),"default") +print(f"The n2 check returns: {row}") +print("*"*100) + +## In this example, n1 has one row in our sample table (foo) with columns a, b, and c. +## Node n2 has one row in our sample table (foo) with columns a and b. + +## Remove a column from the pgbench_branches table on n2: + +row = util_test.write_psql("ALTER TABLE pgbench_branches DROP COLUMN filler CASCADE",host,dbname,port2,pw,usr) +print(f"We just removed the filler column from the pgbench_branches table on n2: {row}") +print("*"*100) + + +print("We're going to start here tomorrow") + + + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"Node n1 should contain bid 1/11: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"Node n2 should contain bid 1/11: {row2}") + +print("*"*100) + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO pgbench_branches VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"We're in repair mode - n1 now contains 1/11: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"We're in repair mode - n2 now contains 1/11/22: {row2}") + +print("*"*100) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our rows on n1 and n2: + +row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +print(f"Node n1 should contain bid 1/11: {row1}") + +row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +print(f"Node n2 should contain bid 1/11/22: {row2}") + + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Show that the row update hasn't caused a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") +print(f" n1 pgbench branches contains: {row}") +print("*"*100) + +## Show that the row update hasn't caused a death spiral: +row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") +print(f" n2 pgbench branches contains: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + From 1ae3ad3194dd30681435c900ab35f7014d396f4b Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Thu, 29 Aug 2024 18:52:39 +0000 Subject: [PATCH 07/48] Adding files for three exception table cases from documentation and schedule file --- schedule_files/spock_4.0 | 51 ++------------- t/spock_exception_table_case1.py | 105 ++++++++++++++++++------------ t/spock_exception_table_case2.py | 107 ++++++++++++++++++++----------- t/spock_exception_table_case3.py | 101 +++++++++++++++++++---------- 4 files changed, 206 insertions(+), 158 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 6ff278b..88f7613 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -1,53 +1,16 @@ ## Set up a two node cluster -#t/020_nodectl_install_pgedge.pl - -## Setup scripts for lower level directory -#t/8000a_env_setup_pgedge_node1.pl -#t/8001a_env_setup_pgedge_node2.pl -#t/8000b_install_pgedge_node1.pl -#t/8001b_install_pgedge_node2.pl -## -#t/8051_env_create_node1.pl -#t/8052_env_create_node2.pl - -## Spock repair mode functionality -#t/spock_repair_function.py - -## Remove components, Clean environment and free ports -#t/8998_env_remove_pgedge_node1.pl -#t/8999_env_remove_pgedge_node2.pl -#t/9998_remove_nc_and_pgpass_dirs.py - - -## Exception table functionality - case1 -#t/020_nodectl_install_pgedge.pl -#t/cluster-init.py -#t/spock_exception_table_case1.py -#t/8998_env_remove_pgedge_node1.pl -#t/8999_env_remove_pgedge_node2.pl -#t/9998_remove_nc_and_pgpass_dirs.py - -## Exception table functionality - case2 -#t/020_nodectl_install_pgedge.pl -#t/cluster-init.py -#t/spock_exception_table_case2.py -#t/8998_env_remove_pgedge_node1.pl -#t/8999_env_remove_pgedge_node2.pl -#t/9998_remove_nc_and_pgpass_dirs.py - -## Exception table functionality - case3 t/020_nodectl_install_pgedge.pl t/cluster-init.py -t/spock_exception_table_case3.py -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py -## Exception table functionality - case4 -t/020_nodectl_install_pgedge.pl -t/cluster-init.py +## Spock repair mode functionality +t/spock_repair_function.py +t/spock_exception_table_case99.py +t/spock_exception_table_case1.py +t/spock_exception_table_case2.py t/spock_exception_table_case3.py + +## Clean up scenario t/8998_env_remove_pgedge_node1.pl t/8999_env_remove_pgedge_node2.pl t/9998_remove_nc_and_pgpass_dirs.py diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case1.py index f6b9362..01312e2 100644 --- a/t/spock_exception_table_case1.py +++ b/t/spock_exception_table_case1.py @@ -22,34 +22,64 @@ port2=port1+1 print(port2) -## pgbench-install on n1 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") -print(f"The installation on n1 returns: {res}") -print("*"*100) - -## pgbench-install in n2 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") -print(f"The installation on n2 returns: {res}") print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) -## Use needle/haystack to confirm pgbench is installed on n1 -## confirm with SELECT * FROM spock.tables. -row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") -check=util_test.contains((row),"default") -print(f"The n1 check returns: {row}") +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Setup on n1: +## Create a table: +command1 = "CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) + +## Add a row: +command2 = "INSERT INTO case1 VALUES (1, 11111, 'filler')" +print(f"{command2}") +row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) + +## Add it to the default repset: +command3 = f"spock repset-add-table default case1 {dbname}" +res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") +print(f"The repset-add-table command on n1 returns: {res3}") + +print("*"*100) + +## Setup on n2: +## Create a table: +command4 = "CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) + +## Add a row: +command5 = "INSERT INTO case1 VALUES (1, 11111, 'filler')" +row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) + +## Add it to the default repset: +command6 = f"spock repset-add-table default case1 {dbname}" +res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") +print(f"The repset-add-table command on n2 returns: {res6}") + +print("*"*100) + +## Confirm with SELECT * FROM spock.tables. +row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.tables returns: {row7}") print("*"*100) -## Use needle/haystack to confirm pgbench is installed on n2. -## confirm with SELECT * FROM pgbench_branches on n2. -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -#check=util_test.contains((row),"default") -print(f"The n2 check returns: {row}") +## Confirm with SELECT * FROM spock.tables on n2. +row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) +print(f"The n2 select * from spock.tables returns: {row8}") print("*"*100) + ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n1 that will not be replicated to n2 @@ -57,56 +87,49 @@ DO $$ BEGIN PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (2, 70000, null); + INSERT INTO case1 VALUES (2, 70000, null); END $$; """ print(anon_block) - row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) print(row) ## Look for our row on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case1",host,dbname,port1,pw,usr) print(row1) -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case1",host,dbname,port2,pw,usr) print(row2) print("*"*100) ## Update the record that is out of sync, forcing a record into the exception table... -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) -print(f"The update to bid 2 returns: {row}") -print("*"*100) - -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT * FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") -print(f"SELECT * FROM spock.exception_log returns: {row}") +row = util_test.write_psql("UPDATE case1 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +#print(f"The update to bid 2 returns: {row}") print("*"*100) ## Demonstrate that replication continues on n1 -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) -print(f"The update to bid 1 on n1 returns: {row}") +row = util_test.write_psql("UPDATE case1 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +#print(f"The update to bid 1 on n1 returns: {row}") print("*"*100) ## Show that the row update made it to n1 without causing a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") -print(f"On n1, pgbench branches contains: {row}") +row = util_test.read_psql("SELECT * FROM case1",host,dbname,port1,pw,usr) +print(f"On n1, our table contains: {row}") print("*"*100) ## Show that the row update made it to n2 without a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") -print(f"On n2, pgbench branches contains: {row}") +row = util_test.read_psql("SELECT * FROM case1",host,dbname,port2,pw,usr) +print(f"On n2, our table contains: {row}") print("*"*100) ## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port2,pw,usr) print(f"SELECT * FROM spock.exception_log returns: {row}") print("*"*100) - if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): util_test.EXIT_PASS() diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case2.py index 7c3d727..9c00003 100644 --- a/t/spock_exception_table_case2.py +++ b/t/spock_exception_table_case2.py @@ -22,50 +22,80 @@ port2=port1+1 print(port2) -## pgbench-install on n1 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") -print(f"The installation on n1 returns: {res}") -print("*"*100) - -## pgbench-install in n2 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") -print(f"The installation on n2 returns: {res}") print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) -## Use needle/haystack to confirm pgbench is installed on n1 -## confirm with SELECT * FROM spock.tables. -row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") -check=util_test.contains((row),"default") -print(f"The n1 check returns: {row}") +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Setup on n1: +## Create a table: +command1 = "CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) + +## Add a row: +command2 = "INSERT INTO case2 VALUES (1, 11111, 'filler')" +print(f"{command2}") +row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) + +## Add it to the default repset: +command3 = f"spock repset-add-table default case2 {dbname}" +res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") +print(f"The repset-add-table command on n1 returns: {res3}") + +print("*"*100) + +## Setup on n2: +## Create a table: +command4 = "CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) + +## Add a row: +command5 = "INSERT INTO case2 VALUES (1, 11111, 'filler')" +row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) + +## Add it to the default repset: +command6 = f"spock repset-add-table default case2 {dbname}" +res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") +print(f"The repset-add-table command on n2 returns: {res6}") + +print("*"*100) + +## Confirm with SELECT * FROM spock.tables. +row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.tables returns: {row7}") print("*"*100) -## Use needle/haystack to confirm pgbench is installed on n2. -## confirm with SELECT * FROM pgbench_branches on n2. -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -#check=util_test.contains((row),"default") -print(f"The n2 check returns: {row}") +## Confirm with SELECT * FROM spock.tables on n2. +row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) +print(f"The n2 select * from spock.tables returns: {row8}") print("*"*100) + ## Add two rows that should be replicated from n1 to n2: -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +row = util_test.write_psql("INSERT INTO case2 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) print(f"We inserted bid 11 on n1: {row}") print("*"*100) -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +row = util_test.write_psql("INSERT INTO case2 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) print(f"We inserted bid 22 on n1: {row}") print("*"*100) ## Look for our rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case2",host,dbname,port1,pw,usr) print(f"Node n1 should contain bid 1/11/22: {row1}") -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr) print(f"Node n2 should contain bid 1/11/22: {row2}") print("*"*100) @@ -77,7 +107,7 @@ DO $$ BEGIN PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (33, 33000, null); + INSERT INTO case2 VALUES (33, 33000, null); END $$; """ @@ -87,36 +117,36 @@ ## Check the rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case2",host,dbname,port1,pw,usr) print(f"We're in repair mode - n1 now contains 1/11/22: {row1}") -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr) print(f"We're in repair mode - n2 now contains 1/11/22/33: {row2}") print("*"*100) ## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +row = util_test.write_psql("UPDATE case2 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) print(f"We're in repair mode - the update to bid 33 on n2 returns: {row}") print("*"*100) ## Read from the spock.exception_log on n1; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") -print(f"SELECT * FROM spock.exception_log returns: {row}") -print("*"*100) +#row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +#print(f"SELECT * FROM spock.exception_log returns: {row}") +#print("*"*100) ## Demonstrate that replication continues -row = util_test.write_psql("UPDATE pgbench_branches SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +row = util_test.write_psql("UPDATE case2 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) print(f"The update to bid 11 on n1 returns: {row}") print("*"*100) ## Show that the row update made it to n2 without causing a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") -print(f"bid 11 should be updated on n2, pgbench branches contains: {row}") +row = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr).strip("[]") +print(f"bid 11 should be updated on n2, case2 contains: {row}") print("*"*100) -## Read from the spock.exception_log on n1 (the update from of bid3 should be here); -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr) +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case2';",host,dbname,port1,pw,usr) print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) @@ -129,4 +159,3 @@ util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) - diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case3.py index 0d032c6..22d6c26 100644 --- a/t/spock_exception_table_case3.py +++ b/t/spock_exception_table_case3.py @@ -22,46 +22,79 @@ port2=port1+1 print(port2) -## pgbench-install on n1 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") -print(f"The installation on n1 returns: {res}") -print("*"*100) - -## pgbench-install in n2 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") -print(f"The installation on n2 returns: {res}") print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) -## Use needle/haystack to confirm pgbench is installed on n1 -## confirm with SELECT * FROM spock.tables. -row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") -check=util_test.contains((row),"default") -print(f"The n1 check returns: {row}") +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Setup on n1: +## Create a table: +command1 = "CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) +#print(f"The create table statement on n1 returns: {row1}") + +## Add a row: +command2 = "INSERT INTO case3 VALUES (1, 11111, 'filler')" +print(f"{command2}") +row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) +#print(f"The insert statement on n1 returns: {row2}") + +## Add it to the default repset: +command3 = f"spock repset-add-table default case3 {dbname}" +res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") +print(f"The repset-add-table command on n1 returns: {res3}") + +print("*"*100) + +## Setup on n2: +## Create a table: +command4 = "CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) +#print(f"The create table statement on n2 returns: {row4}") + +## Add a row: +command5 = "INSERT INTO case3 VALUES (1, 11111, 'filler')" +row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) +#print(f"The insert statement on n2 returns: {row5}") + +## Add it to the default repset: +command6 = f"spock repset-add-table default case3 {dbname}" +res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") +print(f"The repset-add-table command on n2 returns: {res6}") + +print("*"*100) + +## Confirm with SELECT relname FROM spock.tables. +row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.tables returns: {row7}") print("*"*100) -## Use needle/haystack to confirm pgbench is installed on n2. -## confirm with SELECT * FROM pgbench_branches on n2. -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -#check=util_test.contains((row),"default") -print(f"The n2 check returns: {row}") +## Confirm with SELECT relname FROM spock.tables on n2. +row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) +print(f"The n2 select * from spock.tables returns: {row8}") print("*"*100) ## Add one row that should be replicated from n1 to n2: -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +row = util_test.write_psql("INSERT INTO case3 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) print(f"We inserted bid 11 on n1: {row}") print("*"*100) ## Look for our rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) print(f"Node n1 should contain bid 1/11: {row1}") -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) print(f"Node n2 should contain bid 1/11: {row2}") print("*"*100) @@ -73,7 +106,7 @@ DO $$ BEGIN PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (22, 22000, null); + INSERT INTO case3 VALUES (22, 22000, null); END $$; """ @@ -83,26 +116,26 @@ ## Check the rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) print(f"We're in repair mode - n1 now contains 1/11: {row1}") -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) print(f"We're in repair mode - n2 now contains 1/11/22: {row2}") print("*"*100) ## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +row = util_test.write_psql("INSERT INTO case3 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) print(f"We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") print("*"*100) ## Look for our rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) +row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) print(f"Node n1 should contain bid 1/11: {row1}") -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) +row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) print(f"Node n2 should contain bid 1/11/22: {row2}") @@ -121,17 +154,17 @@ print("*"*100) ## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") +row = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr).strip("[]") print(f" n1 pgbench branches contains: {row}") print("*"*100) ## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") +row = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr).strip("[]") print(f" n2 pgbench branches contains: {row}") print("*"*100) ## Read from the spock.exception_log on n2 for our needle/haystack step: -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case3';",host,dbname,port2,pw,usr) print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") print("*"*100) From 37d113704584e328088a0b11ff82ad7373d97ca4 Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Tue, 3 Sep 2024 17:29:04 +0000 Subject: [PATCH 08/48] Updates to spock_4.0 schedule file/exception_table_case4.py, and t/util_test.py (added function to enable autoddl) --- schedule_files/spock_4.0 | 3 +- t/spock_exception_table_case4.py | 178 +++++++++++++------------------ t/util_test.py | 53 +++++++++ 3 files changed, 129 insertions(+), 105 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 88f7613..000c33e 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -5,10 +5,11 @@ t/cluster-init.py ## Spock repair mode functionality t/spock_repair_function.py -t/spock_exception_table_case99.py +#t/spock_exception_table_case99.py t/spock_exception_table_case1.py t/spock_exception_table_case2.py t/spock_exception_table_case3.py +t/spock_exception_table_case4.py ## Clean up scenario t/8998_env_remove_pgedge_node1.pl diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py index 94b8966..2ef44c0 100644 --- a/t/spock_exception_table_case4.py +++ b/t/spock_exception_table_case4.py @@ -1,4 +1,4 @@ -import sys, os, util_test,subprocess +import sys, os, util_test, subprocess, time ## Print Script print(f"Starting - {os.path.basename(__file__)}") @@ -18,139 +18,109 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") +seconds=int(os.getenv("EDGE_SLEEP")) -port2=port1+1 +port2 = port1+1 print(port2) - -## pgbench-install on n1 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n1") -print(f"The installation on n1 returns: {res}") -print("*"*100) - -## pgbench-install in n2 -## CONFIRM that if a database name and repset name are provided, pgbench is installed as expected and the transactions are added to the repset -cmd_node = f"app pgbench-install {dbname} -r default" -res=util_test.run_cmd("running pgbench-install including repsetname", cmd_node, f"{cluster_dir}/n2") -print(f"The installation on n2 returns: {res}") +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + +## Check the information from cluster list-nodes. +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") print("*"*100) -## Use needle/haystack to confirm pgbench is installed on n1 -## confirm with SELECT * FROM spock.tables. -row = util_test.read_psql("SELECT * FROM spock.tables",host,dbname,port1,pw,usr).strip("[]") -check=util_test.contains((row),"default") -print(f"The n1 check returns: {row}") +## Setup on n1 +## Create a table with three columns: +command1 = "CREATE TABLE case4 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" +row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) +#print(f"The create table statement on n1 returns: {row1}") + +## Add a row: +command2 = "INSERT INTO case4 VALUES (1, 11111, 'filler')" +print(f"{command2}") +row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) +#print(f"The insert statement on n1 returns: {row2}") + +## Add it to the default repset: +command3 = f"spock repset-add-table default case4 {dbname}" +res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") +print(f"The repset-add-table command on n1 returns: {res3}") print("*"*100) -## Use needle/haystack to confirm pgbench is installed on n2. -## confirm with SELECT * FROM pgbench_branches on n2. -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -#check=util_test.contains((row),"default") -print(f"The n2 check returns: {row}") +## Setup on n2 +## Create a table with the same name as the table on n1, but with two columns: +command4 = "CREATE TABLE case4 (bid integer PRIMARY KEY, bbalance integer)" +row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) +#print(f"The create table statement on n2 returns: {row4}") + +## Add a row: +command5 = "INSERT INTO case4 VALUES (1, 11111)" +row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) +#print(f"The insert statement on n2 returns: {row5}") + +## Add it to the default repset: +command6 = f"spock repset-add-table default case4 {dbname}" +res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") +print(f"The repset-add-table command on n2 returns: {res6}") print("*"*100) -## In this example, n1 has one row in our sample table (foo) with columns a, b, and c. -## Node n2 has one row in our sample table (foo) with columns a and b. - -## Remove a column from the pgbench_branches table on n2: - -row = util_test.write_psql("ALTER TABLE pgbench_branches DROP COLUMN filler CASCADE",host,dbname,port2,pw,usr) -print(f"We just removed the filler column from the pgbench_branches table on n2: {row}") +## Confirm with SELECT * FROM spock.tables. +row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.tables returns: {row7}") print("*"*100) - -print("We're going to start here tomorrow") - - - -## Add one row that should be replicated from n1 to n2: - -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(11, 11000, null)",host,dbname,port1,pw,usr) -print(f"We inserted bid 11 on n1: {row}") +## Check the values in case4 on n1. +row7 = util_test.read_psql("SELECT * FROM case4;",host,dbname,port1,pw,usr) +print(f"The n1 select * from case4 returns: {row7}") print("*"*100) -## Look for our rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) -print(f"Node n1 should contain bid 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -print(f"Node n2 should contain bid 1/11: {row2}") +## Confirm with SELECT * FROM spock.tables on n2. +row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) +print(f"The n2 select * from spock.tables returns: {row8}") print("*"*100) -## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will -## add a row to n2 that will not be replicated to n1: - -anon_block = """ -DO $$ -BEGIN - PERFORM spock.repair_mode('True'); - INSERT INTO pgbench_branches VALUES (22, 22000, null); -END $$; -""" - -print(anon_block) -row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) -print(row) - -## Check the rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) -print(f"We're in repair mode - n1 now contains 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -print(f"We're in repair mode - n2 now contains 1/11/22: {row2}") - +## Check the values in case4 on n2. +row7 = util_test.read_psql("SELECT * FROM case4;",host,dbname,port2,pw,usr) +print(f"The n2 select * from case4 returns: {row7}") print("*"*100) -## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: - -row = util_test.write_psql("INSERT INTO pgbench_branches VALUES(22, 99000, null)",host,dbname,port1,pw,usr) -print(f"We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") -print("*"*100) - -## Look for our rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr) -print(f"Node n1 should contain bid 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr) -print(f"Node n2 should contain bid 1/11/22: {row2}") - - -## Check the results from the statement above, and you can see the duplicate primary key error -## is not being caught. Fix this when the patch is in. +## Enable AutoDDL (uses connection that allows ALTER SYSTEM SET) and reload configuration: +n1enable = util_test.enable_autoddl(host, dbname, port1, pw, usr) +n2enable = util_test.enable_autoddl(host, dbname, port2, pw, usr) -## Read from the spock.exception_log on n1; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") -print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +## Check our variable values; +row = util_test.read_psql("SELECT name, setting FROM pg_settings WHERE NAME LIKE 'spock.%'",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) -## Read from the spock.exception_log on n2; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") -print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +## Check our variable values; +row = util_test.read_psql("SELECT name, setting FROM pg_settings WHERE NAME LIKE 'spock.%'",host,dbname,port2,pw,usr) +print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) -## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port1,pw,usr).strip("[]") -print(f" n1 pgbench branches contains: {row}") +## Drop the filler column from n1: +command1 = "ALTER TABLE case4 DROP COLUMN filler" +row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) +#print(f"We just dropped the filler column from n1: {row1}") print("*"*100) -## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM pgbench_branches",host,dbname,port2,pw,usr).strip("[]") -print(f" n2 pgbench branches contains: {row}") +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'queue'",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) -## Read from the spock.exception_log on n2 for our needle/haystack step: -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) -print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +## Read from the spock.exception_log; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'queue';",host,dbname,port2,pw,usr) +print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) -if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): +if 'ALTER TABLE case4 DROP COLUMN filler' in str(row): util_test.EXIT_PASS() else: diff --git a/t/util_test.py b/t/util_test.py index 1b5eebb..7b71634 100644 --- a/t/util_test.py +++ b/t/util_test.py @@ -1,5 +1,7 @@ import sys, os, psycopg, json, subprocess, shutil, re, csv from dotenv import load_dotenv +from psycopg import sql + def EXIT_PASS(): print("pass") @@ -23,6 +25,57 @@ def exit_message(p_msg, p_rc=1): sys.exit(p_rc) +# ************************************************************************************************************** +## Enable AutoDDL +# ************************************************************************************************************** +# To call this function, pass a connection string: +# command = util_test.enable_autoddl(host, dbname, port, pw, usr) + +## Get a connection - this connection sets autocommit to True and returns authentication error information + +def get_autoddl_conn(host,dbname,port,pw,usr): + try: + conn = psycopg.connect(dbname=dbname, user=usr, host=host, port=port, password=pw) + conn.autocommit = True + print("Your connection is established, with autocommit = True") + return conn + + except Exception as e: + conn = None + print("The connection attempt failed") + return(con1) + +############################## + +def enable_autoddl(host, dbname, port, pw, usr): + try: + # Connect to the PostgreSQL database + + conn = get_autoddl_conn(host,dbname,port,pw,usr) + cur = conn.cursor() + # We'll execute the following commands: + + cur.execute("ALTER SYSTEM SET spock.enable_ddl_replication = on") + cur.execute("ALTER SYSTEM SET spock.include_ddl_repset = on") + cur.execute("ALTER SYSTEM SET spock.allow_ddl_from_functions = on") + + # Then, reload the PostgreSQL configuration: + cur.execute("SELECT pg_reload_conf()") + print("PostgreSQL configuration reloaded.") + + # Close the cursor and connection + cur.close() + conn.close() + + except Exception as e: + print(f"An error occurred: {e}") + + + + + + + # ************************************************************************************************************** ## Run a pgEdge command # ************************************************************************************************************** From 80f76a32c5b58191322ce85be1c38585730b41d1 Mon Sep 17 00:00:00 2001 From: susan-pgedge Date: Wed, 4 Sep 2024 18:44:46 +0000 Subject: [PATCH 09/48] Updated spock_node_create_unhappy_path tests to remove code that removes node (that interferes with correct error) --- t/spock_node_create_no_dbname.py | 11 ----------- t/spock_node_create_no_dns.py | 11 ----------- t/spock_node_create_no_node_name.py | 11 ----------- t/spock_node_create_no_repset_user.py | 11 ----------- 4 files changed, 44 deletions(-) diff --git a/t/spock_node_create_no_dbname.py b/t/spock_node_create_no_dbname.py index 04cbe47..90d532a 100644 --- a/t/spock_node_create_no_dbname.py +++ b/t/spock_node_create_no_dbname.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a database name: command = f"spock node-create n1 'host={host} user={repuser} dbname={dbname}'" diff --git a/t/spock_node_create_no_dns.py b/t/spock_node_create_no_dns.py index 9240a8d..98bd301 100644 --- a/t/spock_node_create_no_dns.py +++ b/t/spock_node_create_no_dns.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a node name: command = f"spock node-create n1 {dbname}" diff --git a/t/spock_node_create_no_node_name.py b/t/spock_node_create_no_node_name.py index 55c0cdd..d11815b 100644 --- a/t/spock_node_create_no_node_name.py +++ b/t/spock_node_create_no_node_name.py @@ -19,17 +19,6 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") -# -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) # Invoke spock node-create, but don't specify a node name: diff --git a/t/spock_node_create_no_repset_user.py b/t/spock_node_create_no_repset_user.py index b03cec8..d93da35 100644 --- a/t/spock_node_create_no_repset_user.py +++ b/t/spock_node_create_no_repset_user.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a node name: command = f"spock node-create n1 'host={host} user={usr} dbname={dbname}' {dbname}" From 08942406f37bc38c21f0cb061f3ae7630dd31ead Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Tue, 17 Sep 2024 08:35:05 -0400 Subject: [PATCH 10/48] Extra checking on spock ver --- t/setup_02_setup.py | 7 ++++++- t/setup_04_node_setup.py | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/t/setup_02_setup.py b/t/setup_02_setup.py index 7208756..42b62a1 100644 --- a/t/setup_02_setup.py +++ b/t/setup_02_setup.py @@ -39,7 +39,7 @@ modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"./{clicommand} um list" @@ -50,6 +50,11 @@ for key in modules.keys(): if key in line and "Installed" in line: modules[key] = True + if key == "spock" and spockver: + if spockver in line: + print(f"Correct spock ver {spockver} is installed") + else: + util_test.exit_message(f"Faild, wrong spock ver {spockver} installed") for key in modules.keys(): if modules[key]: diff --git a/t/setup_04_node_setup.py b/t/setup_04_node_setup.py index e43ca15..1c4257f 100644 --- a/t/setup_04_node_setup.py +++ b/t/setup_04_node_setup.py @@ -42,7 +42,7 @@ modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"./{clicommand} um list" @@ -53,6 +53,12 @@ for key in modules.keys(): if key in line and "Installed" in line: modules[key] = True + if key == "spock" and spockver: + if spockver in line: + print(f"Correct spock ver {spockver} is installed") + else: + util_test.exit_message(f"Faild, wrong spock ver {spockver} installed") + for key in modules.keys(): if modules[key]: From 783649904a7eb558a34911a56c1023467ecbe3bd Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Tue, 17 Sep 2024 12:09:38 -0400 Subject: [PATCH 11/48] Refactor node/sub create and node/sub drop --- schedule_files/auto_ddl_schedule | 11 ++------ schedule_files/db_schedule | 3 +- schedule_files/filtering_schedule | 7 ++--- schedule_files/northwind_schedule | 3 +- schedule_files/pgbench_schedule | 3 +- schedule_files/repset_tests | 28 +------------------ schedule_files/snowflake | 3 +- schedule_files/spock_4.0 | 3 +- t/cleanup_01_node_remove.py | 2 +- t/cleanup_02_remove.py | 2 +- .../6000_setup_sub_create_n1n2_n1.pl | 0 .../6001_setup_sub_create_n2n1_n2.pl | 0 .../600_cluster_setup_script_v15.pl | 0 .../8002a_env_setup_pgedge_node3.pl | 0 .../8002b_install_pgedge_node3.pl | 0 t/{ => maybe_delete}/8051_env_create_node1.pl | 0 t/{ => maybe_delete}/8052_env_create_node2.pl | 0 17 files changed, 13 insertions(+), 52 deletions(-) rename t/{ => maybe_delete}/6000_setup_sub_create_n1n2_n1.pl (100%) rename t/{ => maybe_delete}/6001_setup_sub_create_n2n1_n2.pl (100%) rename t/{ => maybe_delete}/600_cluster_setup_script_v15.pl (100%) rename t/{ => maybe_delete}/8002a_env_setup_pgedge_node3.pl (100%) rename t/{ => maybe_delete}/8002b_install_pgedge_node3.pl (100%) rename t/{ => maybe_delete}/8051_env_create_node1.pl (100%) rename t/{ => maybe_delete}/8052_env_create_node2.pl (100%) diff --git a/schedule_files/auto_ddl_schedule b/schedule_files/auto_ddl_schedule index f347b1f..5d46d52 100644 --- a/schedule_files/auto_ddl_schedule +++ b/schedule_files/auto_ddl_schedule @@ -8,13 +8,11 @@ t/setup_04_node_setup.py ## # node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## # sub-create ## -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl +t/spock_3_sub_create.py ## # enable autoDDL GUCS ## @@ -55,10 +53,7 @@ t/auto_ddl/6666c_all_objects_validate_n1.sql ## t/6910_teardown_autoddl_gucs_off_n1.pl t/6911_teardown_autoddl_gucs_off_n2.pl -t/8082_env_sub_drop_n1.pl -t/8083_env_sub_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl +t/spock_6_drop.py ## # uninstall pgedge diff --git a/schedule_files/db_schedule b/schedule_files/db_schedule index 3e327de..6222d09 100644 --- a/schedule_files/db_schedule +++ b/schedule_files/db_schedule @@ -3,8 +3,7 @@ t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py t/db-guc-show.py t/db-guc-show-no-guc.py diff --git a/schedule_files/filtering_schedule b/schedule_files/filtering_schedule index f95798f..4ee866e 100644 --- a/schedule_files/filtering_schedule +++ b/schedule_files/filtering_schedule @@ -5,11 +5,8 @@ t/setup_01_install.py ## Setup scripts for lower level directory t/setup_03_node_install.py t/setup_04_node_setup.py -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl +t/spock_2_node_create.py +t/spock_3_sub_create.py #Filtering scripts diff --git a/schedule_files/northwind_schedule b/schedule_files/northwind_schedule index 6025202..4f29fad 100644 --- a/schedule_files/northwind_schedule +++ b/schedule_files/northwind_schedule @@ -2,8 +2,7 @@ t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py #northwind-install commands # diff --git a/schedule_files/pgbench_schedule b/schedule_files/pgbench_schedule index db0e852..c911923 100644 --- a/schedule_files/pgbench_schedule +++ b/schedule_files/pgbench_schedule @@ -2,8 +2,7 @@ t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## pgbench t/pgbench-install.py diff --git a/schedule_files/repset_tests b/schedule_files/repset_tests index 675ccbf..09b9302 100644 --- a/schedule_files/repset_tests +++ b/schedule_files/repset_tests @@ -7,8 +7,7 @@ t/setup_04_node_setup.py ## # node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## # repset replicateDelete=False test cases ## @@ -26,13 +25,6 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateInsert=False test cases ## @@ -50,13 +42,7 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateTruncate=False test cases ## @@ -74,13 +60,7 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl ## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateUpdate=False test cases ## @@ -98,12 +78,6 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # spock node-add and node-drop test cases ## diff --git a/schedule_files/snowflake b/schedule_files/snowflake index de782e4..88042e1 100644 --- a/schedule_files/snowflake +++ b/schedule_files/snowflake @@ -3,8 +3,7 @@ t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py t/snowflake.py t/snowflake_script.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 6b14dd2..461c621 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -5,8 +5,7 @@ t/setup_03_node_install.py t/setup_04_node_setup.py ## Setup scripts for lower level directory -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## Spock 4.0 Scripts t/spock_repair_function.py diff --git a/t/cleanup_01_node_remove.py b/t/cleanup_01_node_remove.py index 6f5ed64..f0aacd9 100644 --- a/t/cleanup_01_node_remove.py +++ b/t/cleanup_01_node_remove.py @@ -29,7 +29,7 @@ modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"um list" diff --git a/t/cleanup_02_remove.py b/t/cleanup_02_remove.py index 9d25202..013000b 100644 --- a/t/cleanup_02_remove.py +++ b/t/cleanup_02_remove.py @@ -26,7 +26,7 @@ modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"um list" diff --git a/t/6000_setup_sub_create_n1n2_n1.pl b/t/maybe_delete/6000_setup_sub_create_n1n2_n1.pl similarity index 100% rename from t/6000_setup_sub_create_n1n2_n1.pl rename to t/maybe_delete/6000_setup_sub_create_n1n2_n1.pl diff --git a/t/6001_setup_sub_create_n2n1_n2.pl b/t/maybe_delete/6001_setup_sub_create_n2n1_n2.pl similarity index 100% rename from t/6001_setup_sub_create_n2n1_n2.pl rename to t/maybe_delete/6001_setup_sub_create_n2n1_n2.pl diff --git a/t/600_cluster_setup_script_v15.pl b/t/maybe_delete/600_cluster_setup_script_v15.pl similarity index 100% rename from t/600_cluster_setup_script_v15.pl rename to t/maybe_delete/600_cluster_setup_script_v15.pl diff --git a/t/8002a_env_setup_pgedge_node3.pl b/t/maybe_delete/8002a_env_setup_pgedge_node3.pl similarity index 100% rename from t/8002a_env_setup_pgedge_node3.pl rename to t/maybe_delete/8002a_env_setup_pgedge_node3.pl diff --git a/t/8002b_install_pgedge_node3.pl b/t/maybe_delete/8002b_install_pgedge_node3.pl similarity index 100% rename from t/8002b_install_pgedge_node3.pl rename to t/maybe_delete/8002b_install_pgedge_node3.pl diff --git a/t/8051_env_create_node1.pl b/t/maybe_delete/8051_env_create_node1.pl similarity index 100% rename from t/8051_env_create_node1.pl rename to t/maybe_delete/8051_env_create_node1.pl diff --git a/t/8052_env_create_node2.pl b/t/maybe_delete/8052_env_create_node2.pl similarity index 100% rename from t/8052_env_create_node2.pl rename to t/maybe_delete/8052_env_create_node2.pl From 6704665d53884cd78425b197e438788f527495c1 Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Tue, 17 Sep 2024 12:16:36 -0400 Subject: [PATCH 12/48] Moving back files to t --- .../6000_setup_sub_create_n1n2_n1.pl | 0 .../6001_setup_sub_create_n2n1_n2.pl | 0 t/{maybe_delete => }/8051_env_create_node1.pl | 0 t/{maybe_delete => }/8052_env_create_node2.pl | 0 .../600_cluster_setup_script_v15.pl | 56 ------------------- 5 files changed, 56 deletions(-) rename t/{maybe_delete => }/6000_setup_sub_create_n1n2_n1.pl (100%) rename t/{maybe_delete => }/6001_setup_sub_create_n2n1_n2.pl (100%) rename t/{maybe_delete => }/8051_env_create_node1.pl (100%) rename t/{maybe_delete => }/8052_env_create_node2.pl (100%) delete mode 100644 t/maybe_delete/600_cluster_setup_script_v15.pl diff --git a/t/maybe_delete/6000_setup_sub_create_n1n2_n1.pl b/t/6000_setup_sub_create_n1n2_n1.pl similarity index 100% rename from t/maybe_delete/6000_setup_sub_create_n1n2_n1.pl rename to t/6000_setup_sub_create_n1n2_n1.pl diff --git a/t/maybe_delete/6001_setup_sub_create_n2n1_n2.pl b/t/6001_setup_sub_create_n2n1_n2.pl similarity index 100% rename from t/maybe_delete/6001_setup_sub_create_n2n1_n2.pl rename to t/6001_setup_sub_create_n2n1_n2.pl diff --git a/t/maybe_delete/8051_env_create_node1.pl b/t/8051_env_create_node1.pl similarity index 100% rename from t/maybe_delete/8051_env_create_node1.pl rename to t/8051_env_create_node1.pl diff --git a/t/maybe_delete/8052_env_create_node2.pl b/t/8052_env_create_node2.pl similarity index 100% rename from t/maybe_delete/8052_env_create_node2.pl rename to t/8052_env_create_node2.pl diff --git a/t/maybe_delete/600_cluster_setup_script_v15.pl b/t/maybe_delete/600_cluster_setup_script_v15.pl deleted file mode 100644 index 4534998..0000000 --- a/t/maybe_delete/600_cluster_setup_script_v15.pl +++ /dev/null @@ -1,56 +0,0 @@ -# This test case runs the command: -# ./nodectl cluster create-local demo 2 --pg 15 -# - -use strict; -use warnings; -use lib './t/lib'; -use contains; -use File::Which; -use IPC::Cmd qw(run); -use Try::Tiny; -use JSON; - -# -# Move into the pgedge directory. -# - chdir("./pgedge"); - -# -# First, we use nodectl to create a two-node cluster named demo; the nodes are named n1/n2 (default names), -# the database is named lcdb (default), and it is owned by lcdb (default). At this point, lcdb is not added -# to the .pgpass file. -# - -my $cmd = qq(./nodectl cluster local-create demo 2 --pg 15); -print("cmd = $cmd\n"); -my ($success, $error_message, $full_buf, $stdout_buf, $stderr_buf)= IPC::Cmd::run(command => $cmd, verbose => 0); - -# -# Print statements -# - -print("full_buf = @$full_buf\n"); -print("stderr_buf = @$stderr_buf\n"); - -print("This s/b a 2 node cluster named demo, owned by lcdb, with a db named lcdb. The nodes are named n1/n2.\n"); -print("Right now, they're running on 6432 and 6433\n"); - -# -# Then, we retrieve the Postgres version (the component) number from nodectl in json form... -# this is to catch cases where more than one copy of Postgres is running. -# -my $json = `./nc --json info pg15`; -my $out = decode_json($json); -my $component = $out->[0]->{"component"}; -print("The cluster is running = {$component}\n"); - -if(contains($component, "pg15")) -{ -exit(0); -} -else -{ -exit(1); -} - From 951a9aea6079f56bab062cbfb28131b77976a5ca Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 17 Sep 2024 17:24:34 +0000 Subject: [PATCH 13/48] Cleanup spock_4.0 schedule file --- schedule_files/spock_4.0 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 0a95355..faa39ce 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -4,7 +4,7 @@ t/setup_01_install.py t/cluster-init.py ## Setup scripts for lower level directory -t/spock_2_node_create.py +#t/spock_2_node_create.py ## Spock 4.0 Scripts t/spock_repair_function.py @@ -19,4 +19,4 @@ t/spock_exception_table_case4.py t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/cleanup_03_remove_nc.py \ No newline at end of file +t/cleanup_03_remove_nc.py From 1162f6e2c236aa6e151393691e3f93ae35f63b43 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 17 Sep 2024 17:40:47 +0000 Subject: [PATCH 14/48] Cleanup um_schedule schedule file --- schedule_files/um_schedule | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/schedule_files/um_schedule b/schedule_files/um_schedule index 7f42a55..9eefaf1 100644 --- a/schedule_files/um_schedule +++ b/schedule_files/um_schedule @@ -1,10 +1,10 @@ t/setup_01_install.py t/setup_02_setup.py -t/um1_install_available_components.py -#t/um_install_available_components.py -#t/um_update_available_components.py -#t/um_remove_available_components.py +#t/um1_install_available_components.py +t/um_install_available_components.py +t/um_update_available_components.py +t/um_remove_available_components.py -t/cleanup_02_remove.py +#t/cleanup_02_remove.py t/cleanup_03_remove_nc.py From b822c2f04d75a2c55fd87f5a861f4549c9c217e4 Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Wed, 18 Sep 2024 13:13:03 -0400 Subject: [PATCH 15/48] rm metrics check issue --- t/spock_2_node_create.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/t/spock_2_node_create.py b/t/spock_2_node_create.py index 5aa2746..adc32df 100644 --- a/t/spock_2_node_create.py +++ b/t/spock_2_node_create.py @@ -28,13 +28,6 @@ def run(): util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Node Create", 1) port = port + 1 - ## Metrics Check Test - cmd_node = f"spock metrics-check {db}" - res=util_test.run_cmd("Metrics Check", cmd_node, f"{cluster_dir}/n1") - print(res) - if res.returncode == 1 or "mount_point" not in res.stdout: - util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Metrics Check", 1) - if __name__ == "__main__": ## Print Script print(f"Starting - {os.path.basename(__file__)}") From 17572075d324ce04cb718453bc847976e55043a4 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 18 Sep 2024 17:59:50 +0000 Subject: [PATCH 16/48] Updated needle to fix result set in column_filtering.pl --- t/column_filtering.pl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/t/column_filtering.pl b/t/column_filtering.pl index 0bdc264..1f062e4 100644 --- a/t/column_filtering.pl +++ b/t/column_filtering.pl @@ -198,11 +198,9 @@ exit(1); } -## Needle and Haystack - Note - this case was erroneously passing before. I've updated the 'needle' to include the content -# that should be returned when column filtering works, but the number of spaces in the NULL column may need to be adjusted -# when the fix is in: +## Needle and Haystack -if(contains(@$stdout_buf7[0], "8 | | Alice | Adams | 18 Austin Blvd | Austin, TX | | US |")) +if(contains(@$stdout_buf7[0], "8 | | Alice | Adams | 18 Austin Blvd")) { exit(0); } From aebeac45992b0cfa38ed4f74e9ecdf37f63afe23 Mon Sep 17 00:00:00 2001 From: Cady Motyka Date: Wed, 18 Sep 2024 14:30:14 -0400 Subject: [PATCH 17/48] fixing cleanup --- schedule_files/service_schedule | 1 - t/cleanup_01_node_remove.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/schedule_files/service_schedule b/schedule_files/service_schedule index 0a69bca..195d30f 100644 --- a/schedule_files/service_schedule +++ b/schedule_files/service_schedule @@ -14,5 +14,4 @@ t/service_enable_error.pl t/399_um_breakdown_script.pl -t/cleanup_02_remove.py t/cleanup_03_remove_nc.py diff --git a/t/cleanup_01_node_remove.py b/t/cleanup_01_node_remove.py index f0aacd9..9cbdcb1 100644 --- a/t/cleanup_01_node_remove.py +++ b/t/cleanup_01_node_remove.py @@ -25,6 +25,8 @@ util_test.printres(res) if res.returncode != 0: util_test.exit_message(f"Couldn't remove node {n}") + cmd_node = f"remove backrest" + res=util_test.run_nc_cmd("Remove", cmd_node, nodedir) modules = { pgname: False, From a5bc4a4bb53cc0531610c69e141c58b76531806e Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 24 Sep 2024 13:52:23 +0000 Subject: [PATCH 18/48] Updated cluster-init.py to set Spock version to blank (interpreted as use the default) --- t/cluster-init.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/t/cluster-init.py b/t/cluster-init.py index e6afd04..5a6f5d4 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -18,13 +18,14 @@ repuser=os.getenv("EDGE_REPUSER","susan") repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") -spockver=("EDGE_SPOCK_DEFAULT_VER","3.3.6") -spockpinver=("EDGE_SPOCK_PINNED_VER","3.3.6") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") dbname=os.getenv("EDGE_DB","lcdb") cwd=os.getcwd() num_nodes=3 + + #print("*"*100) print(f"home_dir = {home_dir}\n") @@ -32,6 +33,8 @@ res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") print(f"res = {res}\n") +new_ver = (f"{spockver}") +print(new_ver) new_path_0 = (f"{cwd}/{cluster_dir}/n1") new_path_1 = (f"{cwd}/{cluster_dir}/n2") new_path_2 = (f"{cwd}/{cluster_dir}/n3") @@ -40,6 +43,7 @@ with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: data = json.load(file) #print(data) + data["pgedge"]["spock"]["spock_version"] = new_ver data["node_groups"][0]["path"] = new_path_0 data["node_groups"][1]["path"] = new_path_1 data["node_groups"][2]["path"] = new_path_2 @@ -48,6 +52,7 @@ with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: file.write(newdata) +print(newdata) command = (f"cluster init {cluster_name}") init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") From 0f94434ee910e0408f723b6d66f4a0974b999b1f Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 25 Sep 2024 14:29:00 +0000 Subject: [PATCH 19/48] Tweaked spock_exception_table_case1.py to add clause that should trigger the server crash if it exists --- t/spock_exception_table_case1.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case1.py index 01312e2..a4ba27b 100644 --- a/t/spock_exception_table_case1.py +++ b/t/spock_exception_table_case1.py @@ -125,13 +125,20 @@ print(f"On n2, our table contains: {row}") print("*"*100) +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + ## Read from the spock.exception_log; row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log returns: {row}") +print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): - util_test.EXIT_PASS() else: util_test.EXIT_FAIL() From d97ba49a1a2cf5b60e1508aad10ef8008fe00126 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 25 Sep 2024 16:09:45 +0000 Subject: [PATCH 20/48] Removing unneeded setup step --- schedule_files/spock_4.0 | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index faa39ce..12ea421 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -3,9 +3,6 @@ t/setup_01_install.py t/cluster-init.py -## Setup scripts for lower level directory -#t/spock_2_node_create.py - ## Spock 4.0 Scripts t/spock_repair_function.py #t/spock_exception_table_case99.py @@ -14,9 +11,7 @@ t/spock_exception_table_case2.py t/spock_exception_table_case3.py t/spock_exception_table_case4.py - ## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py - # Delete the nc directory and pgpass file -t/cleanup_03_remove_nc.py +#t/cleanup_03_remove_nc.py From 7781f43fc99721d07c7c0ee6c0f2c6064b5c46c3 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Thu, 3 Oct 2024 14:01:33 +0000 Subject: [PATCH 21/48] Correcting search for needle/haystack - previous search looked for table name and new search looks for SQL command --- t/spock_exception_table_case4.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py index 2ef44c0..9c8ae79 100644 --- a/t/spock_exception_table_case4.py +++ b/t/spock_exception_table_case4.py @@ -110,12 +110,12 @@ print("*"*100) ## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'queue'",host,dbname,port1,pw,usr) +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr) print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) ## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'queue';",host,dbname,port2,pw,usr) +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) From 8f48c00854de8ec81ea96c89ef0f173602e07d28 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 15 Oct 2024 13:44:03 +0000 Subject: [PATCH 22/48] Adding updates to repset check test scripts from Lahari that remove extra ()s from call to sleep function --- t/8064_env_delete_replication_check.pl | 2 +- t/8069_env_insert_replication_check.pl | 2 +- t/8074_env_update_replication_check.pl | 2 +- t/8079_env_truncate_replication_check.pl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/t/8064_env_delete_replication_check.pl b/t/8064_env_delete_replication_check.pl index 4824ba4..1a0308d 100644 --- a/t/8064_env_delete_replication_check.pl +++ b/t/8064_env_delete_replication_check.pl @@ -130,7 +130,7 @@ # Listing table contents of Port2 6433 # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print("cmd999 = $cmd999\n"); diff --git a/t/8069_env_insert_replication_check.pl b/t/8069_env_insert_replication_check.pl index 9b73c13..ef03646 100644 --- a/t/8069_env_insert_replication_check.pl +++ b/t/8069_env_insert_replication_check.pl @@ -225,7 +225,7 @@ # Listing table contents of Port2 6433 # # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print("TRUNCATE FUNCTION REPLICATION CHECK IN NODE n2\n"); diff --git a/t/8074_env_update_replication_check.pl b/t/8074_env_update_replication_check.pl index 03042bd..e0a2366 100644 --- a/t/8074_env_update_replication_check.pl +++ b/t/8074_env_update_replication_check.pl @@ -132,7 +132,7 @@ print("INSERT=TRUE REPLICATION CHECK IN NODE n2\n"); # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print ("-"x45,"\n"); diff --git a/t/8079_env_truncate_replication_check.pl b/t/8079_env_truncate_replication_check.pl index b43dd87..bfbe321 100644 --- a/t/8079_env_truncate_replication_check.pl +++ b/t/8079_env_truncate_replication_check.pl @@ -173,7 +173,7 @@ print("INSERT=TRUE REPLICATION CHECK IN NODE n2\n"); # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print ("-"x45,"\n"); From bbee0b90fc9a4f7d52fc584057d225fd86b4afab Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Tue, 15 Oct 2024 16:52:34 +0500 Subject: [PATCH 23/48] Adjust autoDDL tests to incorporate recent fixes Updated tests to account for recent fixes in autoDDL: - DDL executed via EXPLAIN ANALYZE - Correct repset assignment for child partitioned tables. - Set lc_monetary in config.env for consistent handling of money datatype. --- .../6111a_table_tx_ctas_selectinto_like.out | 13 +++------ .../6111a_table_tx_ctas_selectinto_like.sql | 6 +---- .../6111b_table_validate_and_drop_n2.out | 7 ++--- .../6122a_table_range_partitions_n1.out | 27 ++++++++----------- .../6122a_table_range_partitions_n1.sql | 6 +---- ...22b_table_range_partitions_validate_n2.out | 14 +++++----- t/auto_ddl/6144a_table_hash_partitions_n1.out | 16 +++++------ t/auto_ddl/6144a_table_hash_partitions_n1.sql | 5 +--- ...144b_table_hash_partitions_validate_n2.out | 26 ++++++++---------- ...144b_table_hash_partitions_validate_n2.sql | 7 ++--- ...6666b_all_objects_validate_and_drop_n2.out | 6 +++++ ...6666b_all_objects_validate_and_drop_n2.sql | 2 ++ t/lib/config.env | 5 +++- 13 files changed, 60 insertions(+), 80 deletions(-) diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out index 8318ec4..c9e4e27 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out @@ -380,13 +380,8 @@ EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set \o /dev/null EXPLAIN ANALYZE CREATE TABLE table_ctas6 AS SELECT 1 AS a; -INFO: DDL statement replicated. +WARNING: DDL statement replicated, but could be unsafe. \o -/* -TO FIX: -At present, no repset is assigned for table created through EXPLAIN ANALYZE -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=65421352 -*/ -- Validate table_ctas6 \d table_ctas6 Table "public.table_ctas6" @@ -395,9 +390,9 @@ https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL a | integer | | | EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set - nspname | relname | set_name ----------+-------------+---------- - public | table_ctas6 | + nspname | relname | set_name +---------+-------------+--------------------- + public | table_ctas6 | default_insert_only (1 row) ----------------------------------- diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql index 878198e..3c95d56 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql @@ -175,11 +175,7 @@ EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set EXPLAIN ANALYZE CREATE TABLE table_ctas6 AS SELECT 1 AS a; \o -/* -TO FIX: -At present, no repset is assigned for table created through EXPLAIN ANALYZE -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=65421352 -*/ + -- Validate table_ctas6 \d table_ctas6 EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set diff --git a/t/auto_ddl/6111b_table_validate_and_drop_n2.out b/t/auto_ddl/6111b_table_validate_and_drop_n2.out index a45afa3..686c1a0 100644 --- a/t/auto_ddl/6111b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6111b_table_validate_and_drop_n2.out @@ -249,9 +249,9 @@ SELECT * FROM table_ctas5 ORDER BY num; a | integer | | | EXECUTE spocktab('table_ctas6'); -- Replication set: default_insert_only - nspname | relname | set_name ----------+-------------+---------- - public | table_ctas6 | + nspname | relname | set_name +---------+-------------+--------------------- + public | table_ctas6 | default_insert_only (1 row) -- Expected data: 1 @@ -547,6 +547,7 @@ NOTICE: drop cascades to table table_ctas5 membership in replication set defaul INFO: DDL statement replicated. DROP TABLE DROP TABLE table_ctas6; +NOTICE: drop cascades to table table_ctas6 membership in replication set default_insert_only INFO: DDL statement replicated. DROP TABLE --cleanup for select into diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.out b/t/auto_ddl/6122a_table_range_partitions_n1.out index 7526423..4868c45 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.out +++ b/t/auto_ddl/6122a_table_range_partitions_n1.out @@ -126,17 +126,12 @@ Indexes: "revenue_range_pkey" PRIMARY KEY, btree (rev_id, rev_date) Number of partitions: 2 (Use \d+ to list them.) -/*TO FIX: -At present, adding a parimary key to parent table does not move the partitions to default repset. -To revisit and update outputs once this is addressed -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -*/ EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set - nspname | relname | set_name ----------+--------------------+--------------------- + nspname | relname | set_name +---------+--------------------+---------- public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only + public | revenue_range_2021 | default + public | revenue_range_2022 | default (3 rows) -- Add another partition to the modified table @@ -158,11 +153,11 @@ Indexes: Access method: heap EXECUTE spocktab('revenue_range'); -- Expect revenue_range_2023 in default set - nspname | relname | set_name ----------+--------------------+--------------------- + nspname | relname | set_name +---------+--------------------+---------- public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only + public | revenue_range_2021 | default + public | revenue_range_2022 | default public | revenue_range_2023 | default (4 rows) @@ -312,12 +307,12 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate structure and data EXECUTE spocktab('inventory'); -- Expect inventory_standalone to be listed - nspname | relname | set_name ----------+-------------------------+--------------------- + nspname | relname | set_name +---------+-------------------------+---------- public | inventory_range | default public | inventory_range_2021 | default public | inventory_range_default | default - public | inventory_standalone | default_insert_only + public | inventory_standalone | default (4 rows) SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.sql b/t/auto_ddl/6122a_table_range_partitions_n1.sql index 77e8b07..b064fc6 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.sql +++ b/t/auto_ddl/6122a_table_range_partitions_n1.sql @@ -61,11 +61,7 @@ EXECUTE spocktab('sales_range'); -- Expect sales_range_2023 in default set -- Add a primary key to a range partitioned table that initially didn't have one ALTER TABLE revenue_range ADD PRIMARY KEY (rev_id, rev_date); \d revenue_range -/*TO FIX: -At present, adding a parimary key to parent table does not move the partitions to default repset. -To revisit and update outputs once this is addressed -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -*/ + EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set -- Add another partition to the modified table diff --git a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out index 19ffc15..7d2d3d2 100644 --- a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out +++ b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out @@ -11,11 +11,11 @@ EXECUTE spocktab('sales_range'); -- Expect sales_range, sales_range_2022, sales_ (3 rows) EXECUTE spocktab('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set - nspname | relname | set_name ----------+--------------------+--------------------- + nspname | relname | set_name +---------+--------------------+---------- public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only + public | revenue_range_2021 | default + public | revenue_range_2022 | default public | revenue_range_2023 | default (4 rows) @@ -268,8 +268,8 @@ INFO: DDL statement replicated. DROP TABLE DROP TABLE revenue_range CASCADE; NOTICE: drop cascades to table revenue_range_2023 membership in replication set default -NOTICE: drop cascades to table revenue_range_2022 membership in replication set default_insert_only -NOTICE: drop cascades to table revenue_range_2021 membership in replication set default_insert_only +NOTICE: drop cascades to table revenue_range_2022 membership in replication set default +NOTICE: drop cascades to table revenue_range_2021 membership in replication set default NOTICE: drop cascades to table revenue_range membership in replication set default INFO: DDL statement replicated. DROP TABLE @@ -280,7 +280,7 @@ NOTICE: drop cascades to table orders_range membership in replication set defau INFO: DDL statement replicated. DROP TABLE DROP TABLE inventory_range CASCADE; -NOTICE: drop cascades to table inventory_standalone membership in replication set default_insert_only +NOTICE: drop cascades to table inventory_standalone membership in replication set default NOTICE: drop cascades to table inventory_range_default membership in replication set default NOTICE: drop cascades to table inventory_range_2021 membership in replication set default NOTICE: drop cascades to table inventory_range membership in replication set default diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.out b/t/auto_ddl/6144a_table_hash_partitions_n1.out index 84d310b..6f2cbab 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.out +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.out @@ -258,18 +258,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default - nspname | relname | set_name ----------+-----------------+--------------------- + nspname | relname | set_name +---------+-----------------+---------- public | products_hash | default - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only + public | products_hash_1 | default + public | products_hash_2 | default + public | products_hash_3 | default + public | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.sql b/t/auto_ddl/6144a_table_hash_partitions_n1.sql index d089241..069c44b 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.sql +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.sql @@ -80,9 +80,6 @@ ALTER TABLE products_hash ADD PRIMARY KEY (product_id, product_date); \d products_hash_2 \d products_hash_3 \d products_hash_4 -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ + EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out index f7873c2..c106381 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out @@ -143,18 +143,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default - nspname | relname | set_name ----------+-----------------+--------------------- +EXECUTE spocktab('products_hash'); -- Expect the replication set to be default + nspname | relname | set_name +---------+-----------------+---------- public | products_hash | default - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only + public | products_hash_1 | default + public | products_hash_2 | default + public | products_hash_3 | default + public | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows @@ -168,10 +164,10 @@ SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE products_hash CASCADE; -NOTICE: drop cascades to table products_hash_4 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_3 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_2 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_1 membership in replication set default_insert_only +NOTICE: drop cascades to table products_hash_4 membership in replication set default +NOTICE: drop cascades to table products_hash_3 membership in replication set default +NOTICE: drop cascades to table products_hash_2 membership in replication set default +NOTICE: drop cascades to table products_hash_1 membership in replication set default NOTICE: drop cascades to table products_hash membership in replication set default INFO: DDL statement replicated. DROP TABLE diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql index 48e69ce..9d6fdca 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql @@ -18,11 +18,8 @@ DROP TABLE sales_hash CASCADE; \d products_hash_2 \d products_hash_3 \d products_hash_4 -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default + +EXECUTE spocktab('products_hash'); -- Expect the replication set to be default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE products_hash CASCADE; diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out index bcd01eb..f330329 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out @@ -1,3 +1,9 @@ +SELECT pg_sleep(2);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist \l obj_database diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql index 124a9e5..4b3707e 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql @@ -1,3 +1,5 @@ +SELECT pg_sleep(2);--to ensure all objects are replicated + ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist \l obj_database diff --git a/t/lib/config.env b/t/lib/config.env index c6a664e..c422ce8 100644 --- a/t/lib/config.env +++ b/t/lib/config.env @@ -1,6 +1,6 @@ # Use this file to set a group of values to environment variables; you can source this file to set all the values at once. export EDGE_INSTALL_SCRIPT=install.py -export REPO=https://pgedge-upstream.s3.amazonaws.com/REPO +export REPO=https://pgedge-devel.s3.amazonaws.com/REPO export EDGE_REPO=$REPO/$EDGE_INSTALL_SCRIPT export EDGE_HOST=127.0.0.1 @@ -38,3 +38,6 @@ export EDGE_CLI="pgedge" # Path to store autoddl related actual outputs export EDGE_ACTUAL_OUT_DIR="/tmp/auto_ddl/" + +# To ensure locale related outputs (such as monetary values) stay consistent +export LC_ALL="en_US.UTF-8" From 17dbf08d0a244cc87cafd14d83efcd66b5c18647 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 16 Oct 2024 15:21:11 +0000 Subject: [PATCH 24/48] Added t/install_PGs_and_exercise_service.py script; the script uses UM to find all of the available PG versions and install them. When the installation is complete, the next script in the UM schedule file installs all of the available components for all of the installed versions and lists the installed versions. This schedule does not run fast, and still needs verification steps added before we truly trust it to do a complete job, but it's easy to verify manually with the latest.log file. --- schedule_files/um_schedule | 4 +- t/install_PGs_and_exercise_service.py | 60 +++++++++++++++++++++ t/util_test.py | 77 ++++++++++++++++++++++++++- 3 files changed, 138 insertions(+), 3 deletions(-) create mode 100644 t/install_PGs_and_exercise_service.py diff --git a/schedule_files/um_schedule b/schedule_files/um_schedule index 9eefaf1..d7de7e3 100644 --- a/schedule_files/um_schedule +++ b/schedule_files/um_schedule @@ -1,7 +1,7 @@ t/setup_01_install.py -t/setup_02_setup.py +#t/setup_02_setup.py -#t/um1_install_available_components.py +t/install_PGs_and_exercise_service.py t/um_install_available_components.py t/um_update_available_components.py t/um_remove_available_components.py diff --git a/t/install_PGs_and_exercise_service.py b/t/install_PGs_and_exercise_service.py new file mode 100644 index 0000000..a9cd5b6 --- /dev/null +++ b/t/install_PGs_and_exercise_service.py @@ -0,0 +1,60 @@ +## This script finds all of the available versions of PG and installs each version with the setup command. + +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +pgv=os.getenv("EDGE_INST_VERSION") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +## Create the variables we'll be using in this script: +components = [] +versions = "" +## Service options tested by this script are: +services = ["status","stop","start","restart","reload","enable","disable","config"] +#print("*"*100) + +## We'll call find_pg_versions to return a list of Postgres {versions} available through UM. +versions,components=util_test.find_pg_versions(home_dir) + +## Then loop through the versions and install each version with the setup command: +for version in versions: + ## Find a free port for the PG installation; call get_avail_port and pass in the port number: + free_port=util_test.get_avail_ports(port) + install_pg=(f"setup -U {usr} -d {dbname} -P {pw} --port={free_port} --pg_ver={version}") + print(f"The setup command executing now is: {install_pg}") + installed_res=util_test.run_nc_cmd("Installing Postgres versions available", install_pg, f"{home_dir}") + print(installed_res) + + ## Increase the port value by 1 before installing the next version of Postgres: + port = port + 1 + + ## Check to see if the installation was successful + if installed_res.returncode == 0: + print(f"Command succeeded for Postgres {version}:{installed_res.stdout}") + +for component in components: + for svc in services: + ## Exercise the installed Postgres services + print(f"component: {component}") + print(f"svc: {svc}") + command = (f"service {svc} --component={component}") + exercise_svc=util_test.run_nc_cmd("Exercising the service", command, f"{home_dir}") + print(f"The command to exercise the service contains: {exercise_svc}") + + diff --git a/t/util_test.py b/t/util_test.py index 7b71634..7664bc2 100644 --- a/t/util_test.py +++ b/t/util_test.py @@ -1,4 +1,4 @@ -import sys, os, psycopg, json, subprocess, shutil, re, csv +import sys, os, psycopg, json, subprocess, shutil, re, csv, socket from dotenv import load_dotenv from psycopg import sql @@ -472,3 +472,78 @@ def printres(res: subprocess.CompletedProcess[str]) -> None: print("stderr:") for line in error.splitlines(): print(f"\t{line}") + +################################################################### +## Find an available port +################################################################### +def get_avail_ports(p_def_port): + def_port = int(p_def_port) + + # iterate to first non-busy port + while is_socket_busy(def_port): + def_port = def_port + 1 + continue + + err_msg = "Port must be between 1000 and 9999, try again." + + while True: + s_port = str(def_port) + + if s_port.isdigit() == False: + print(err_msg) + continue + + i_port = int(s_port) + + if (i_port < 1000) or (i_port > 9999): + print(err_msg) + continue + + if is_socket_busy(i_port): + if not isJSON: + print("Port " + str(i_port) + " is in use.") + def_port = str(i_port + 1) + continue + + break + + return i_port + +## Required for get_available_port + +def is_socket_busy(p_port): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = s.connect_ex(("127.0.0.1",p_port)) + s.close() + print(result) + if result == 0: + return True + else: + return False + +###################################################################### +# Find the pg_versions available to um +###################################################################### + +def find_pg_versions(home_dir): + components = [] + ## We need to pass in the value of {home_dir}; it should return a list of components with the pg removed from the front. + ## Use um to find all of the available versions of Postgres. + res=run_nc_cmd("Getting list of available versions of Postgres", "um list --json", f"{home_dir}") + print(f"{res}") + + ## Break the returned json string into a list: + res = json.loads(res.stdout) + ## Go through the json and find the available PG versions and append it to the components variable: + for i in res: + comp=(i.get("component")) + print(comp) + ## Append the component name to the components variable: + components.append(comp) + print(components) + ## Remove the first two letters from in front of the component name (pgXX) to make it just the version (XX): + versions = [item[2:] for item in components] + print(versions) + return versions, components + +###################################################################### From 49c480762ebcb4c42dfa67dc4000eea561447b17 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 23 Oct 2024 16:40:53 +0000 Subject: [PATCH 25/48] Added t/cluster-init-bad-version.py/updated cluster_schedule to do a negative test case (installing PG 14). --- t/cluster-init-bad-version.py | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 t/cluster-init-bad-version.py diff --git a/t/cluster-init-bad-version.py b/t/cluster-init-bad-version.py new file mode 100644 index 0000000..ba65166 --- /dev/null +++ b/t/cluster-init-bad-version.py @@ -0,0 +1,75 @@ +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") +dbname=os.getenv("EDGE_DB","lcdb") + +cwd=os.getcwd() +num_nodes=3 + +## Set Postgres version to an invalid version; we'll use 14 since it's deprecated, so this might not fail on all branches right away. +pgv="14" + +#print("*"*100) + +print(f"home_dir = {home_dir}\n") +command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") +print(f"res = {res}\n") + +new_ver = (f"{spockver}") +print(new_ver) +new_path_0 = (f"{cwd}/{cluster_dir}/n1") +new_path_1 = (f"{cwd}/{cluster_dir}/n2") +new_path_2 = (f"{cwd}/{cluster_dir}/n3") + + +with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: + data = json.load(file) + #print(data) + data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + data["node_groups"][2]["path"] = new_path_2 + +newdata = json.dumps(data, indent=4) +with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: + file.write(newdata) + +print(newdata) + +command = (f"cluster init {cluster_name}") +init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") +print(f"init = {init.stdout}\n") +print("*"*100) + +## Note: this is a negative test, so the search is looking for the phrase [FAILED] in the results. In this case, passing the test +## while failing the installation is the desired behavior! + +# Needle and Haystack +# Confirm the command worked by looking for: + +if "[FAILED]" in str(init.stdout) or init.returncode == 1: + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + + + From e0cf01351407a6e44a958f74476d18645b5552c9 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 23 Oct 2024 19:58:09 +0000 Subject: [PATCH 26/48] Updating files in the cluster schedule to fix ordering issues, add spock version to cluster-init.py --- schedule_files/cluster_schedule | 15 ++++++++++----- t/cluster-init.py | 3 +-- t/cluster-remove-node.py | 1 + t/lib/config.env | 9 ++++++--- 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/schedule_files/cluster_schedule b/schedule_files/cluster_schedule index 0d855f5..9b39380 100644 --- a/schedule_files/cluster_schedule +++ b/schedule_files/cluster_schedule @@ -1,20 +1,25 @@ ## Set up tests for a two node cluster t/setup_01_install.py -t/cluster-create-json.py -t/cluster-json-validate.py -t/cluster-json-invalid-file.py t/cluster-init.py -t/get_info.py t/cluster-remove-node.py t/cluster-add-node.py t/cluster-list-nodes.py -t/cluster-init-bad-json.py t/cluster-replication-check.py ## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py t/cleanup_03_remove_nc.py +t/setup_01_install.py +t/cluster-create-json.py +t/cluster-json-validate.py +t/cluster-json-invalid-file.py +t/cluster-init-bad-version.py +t/cluster-init-bad-json.py + +## Remove components, Clean environment and free ports +t/cleanup_03_remove_nc.py + ## Multi-node cluster tests t/setup_01_install.py t/multi-db_cluster_setup.py diff --git a/t/cluster-init.py b/t/cluster-init.py index 5a6f5d4..ec8d47f 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -25,7 +25,6 @@ num_nodes=3 - #print("*"*100) print(f"home_dir = {home_dir}\n") @@ -34,7 +33,7 @@ print(f"res = {res}\n") new_ver = (f"{spockver}") -print(new_ver) +print(f"Spock new version is: {new_ver}") new_path_0 = (f"{cwd}/{cluster_dir}/n1") new_path_1 = (f"{cwd}/{cluster_dir}/n2") new_path_2 = (f"{cwd}/{cluster_dir}/n3") diff --git a/t/cluster-remove-node.py b/t/cluster-remove-node.py index c810ed6..7f6830d 100644 --- a/t/cluster-remove-node.py +++ b/t/cluster-remove-node.py @@ -52,6 +52,7 @@ print(f"Successful command: {command2}") print(f"The successful remove-node command returns = {res2}\n") print("*"*100) +print("This test case only removes the replication artifacts. The PG installation, data directory, and n3 subdir will remain") # Needle and Haystack # Confirm the command worked by looking for: diff --git a/t/lib/config.env b/t/lib/config.env index c422ce8..a795258 100644 --- a/t/lib/config.env +++ b/t/lib/config.env @@ -27,12 +27,15 @@ export EDGE_DB="lcdb" export EDGE_REPUSER=`whoami` # postgres version details -export EDGE_INST_VERSION=16 +export EDGE_INST_VERSION=17 export EDGE_COMPONENT="pg$EDGE_INST_VERSION" -# spock_ver empty if you want to use spocks default version +# Leave spock_ver empty if you want to use spocks default version # As of 1st August 2024, spock40 is the default pinned version -export EDGE_SPOCK_VER="" + +# As of 10/23/24: Note that if the spock version is empty, cluster add-node will FAIL: +# It will return an error: ERROR: function spock.set_cluster_readonly() does not exist +export EDGE_SPOCK_VER="4.0.5" export EDGE_CLI="pgedge" From 60fd8f08f01d77a2a3ffe75a9cfd1f44f7a447c5 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Fri, 25 Oct 2024 15:46:45 +0000 Subject: [PATCH 27/48] Cleaning up order in long-test schedule, broken test cases --- schedule_files/long-test | 294 +++++++++----------------- t/spock_create_sub_specify_repsets.py | 8 +- 2 files changed, 101 insertions(+), 201 deletions(-) diff --git a/schedule_files/long-test b/schedule_files/long-test index 189c268..d058bbf 100644 --- a/schedule_files/long-test +++ b/schedule_files/long-test @@ -1,45 +1,46 @@ -## Set up a two node cluster -t/setup_01_install.py -t/setup_02_setup.py -#t/get_info.py +## SERVICE MODULE: -## Test Service Module +t/setup_01_install.py +t/install_PGs_and_exercise_service.py -t/service_reload_component.pl -t/service_restart_component.pl -t/service_start_component.pl -t/service_stop_component.pl -t/service_status_without_flag.pl -t/service_enable_component.pl -t/service_disable_component.pl -t/service_enable_error.pl -#t/get_info.py +## UM MODULE -## Test UM Module t/um_install_available_components.py t/um_update_available_components.py t/um_remove_available_components.py -#t/get_info.py +t/cleanup_03_remove_nc.py -## At this point, we do not have a cluster; we have created nc, and installed pgedge on the lower level. There is a -## data directory remaining from the above removal process. +## CLUSTER MODULE -## Stand up rest of cluster -t/cluster-create-json.py -t/cluster-init-bad-json.py -t/cluster-json-invalid-file.py -t/cluster-json-validate.py +t/setup_01_install.py t/cluster-init.py -#t/get_info.py t/cluster-remove-node.py t/cluster-add-node.py t/cluster-list-nodes.py -t/cluster-init-bad-json.py t/cluster-replication-check.py -## Test the DB module -## We have a two node cluster in: /home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n1 and n2 +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py +t/setup_01_install.py +t/cluster-create-json.py +t/cluster-json-validate.py +t/cluster-json-invalid-file.py +t/cluster-init-bad-version.py +t/cluster-init-bad-json.py +## QubeRT specific tests +t/cleanup_03_remove_nc.py +t/setup_01_install.py +t/multi-db_cluster_setup.py +t/multi-db_cluster_exercise_ace.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py +## DB MODULE + +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py t/db-guc-show.py t/db-guc-show-no-guc.py t/db-guc-show-wildcard.py @@ -48,69 +49,49 @@ t/db-guc-set.py t/db-guc-set-invalid-type.py t/db-guc-set-no-reload.py t/db-guc-set-invalid-value.py -#t/get_info.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## At this point, we still have a two node cluster: /home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n1 and n2 +## SPOCK MODULE -## Test sub_tests -# error tests +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/spock_node_create_no_node_name.py t/spock_node_create_no_repset_user.py t/spock_node_create_no_dbname.py t/spock_node_create_no_dns.py -#t/get_info.py - -## -# sub --synchronize_structure tests -## t/spock_sub_create_synch_struct_n1.py t/spock_sub_create_synch_struct_n2.py -#t/get_info.py - -# cleanup scripts t/spock_sub_create_synch_cleanup.py -t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8086_env_node_drop_n1.pl t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# sub --synchronize_data tests -## t/spock_sub_create_synch_data_n1.py t/spock_sub_create_synch_data_n2.py -#t/get_info.py - -# cleanup scripts t/spock_sub_create_synch_cleanup.py t/8083_env_sub_drop_n2.pl t/8086_env_node_drop_n1.pl t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# sub --synchronize_structure and --synchronize_data tests -## t/spock_sub_create_synch_all_n1.py t/spock_sub_create_synch_all_n2.py -#t/get_info.py - -# cleanup scripts -t/spock_sub_create_synch_cleanup.py -t/8083_env_sub_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -#t/get_info.py +t/spock_node_add_interface.py +t/spock_node_drop_interface.py +t/spock_node_add_interface_no_db.py +t/spock_node_drop_interface_no_interface.py +t/spock_node_drop_interface_no_db.py +t/spock_create_sub_specify_repsets.py +t/spock_7_negative_list.py +t/spock_8_negative_create.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## -#t/8000a_env_setup_pgedge_node1.pl -#t/8001a_env_setup_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -#t/get_info.py +## PGBENCH -## pgbench +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py t/pgbench-install.py t/pgbench-remove_leaves_my_table.py t/pgbench-install_with_repset.py @@ -118,11 +99,6 @@ t/pgbench-run.py t/pgbench-validate.py t/pgbench-install_skip.py t/pgbench-remove.py -#t/get_info.py - -## -#pgBench Negative-Tests -# t/pgbench-install_no_dbname.py t/pgbench-install_invalid_dbname.py t/pgbench-install_invalid_dbname_valid_repsetname.py @@ -134,188 +110,118 @@ t/pgbench-run_invalid_rate.py t/pgbench-validate_no_dbname.py t/pgbench-validate_invalid_dbname.py t/pgbench-remove_no_dbname.py -#t/get_info.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py + +## NORTHWIND + +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py +t/northwind-install.py +t/northwind-install_with_repset.py +t/northwind-run.py +t/northwind-validate.py +t/northwind-remove.py +t/northwind-install_no_dbname.py +t/northwind-install_invalid_dbname.py +t/northwind-run_dependency_on_northwind-install.py +t/northwind-install_valid_dbname_invalid_repsetname.py +t/northwind-run_without_dbname.py +t/northwind-run_without_offset.py +t/northwind-validate_no_dbname.py +t/northwind-validate_invalid_dbname.py +t/northwind-remove_no_dbname.py +t/northwind-remove_invalid_dbname.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py ##REPSET TESTS -# -# repset replicateDelete=False test cases -## + +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py +## repset replicateDelete=False test cases t/8060_env_delete_false_n1.pl t/8061_env_sub_n1n2_delete_false.pl t/8062_env_delete_false_n2.pl t/8063_env_sub_n2n1_delete_false.pl t/8064_env_delete_replication_check.pl -#t/get_info.py - -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -#t/get_info.py - -## -# repset replicateTruncate=False test cases -## +## repset replicateInsert=False test cases +t/8065_env_insert_false_n1.pl +t/8066_env_sub_n1n2_insert_false.pl +t/8067_env_insert_false_n2.pl +t/8068_env_sub_n2n1_insert_false.pl +t/8069_env_insert_replication_check.pl +t/8080_env_repset_drop_n1.pl +t/8081_env_repset_drop_n2.pl +t/8082_env_sub_drop_n1.pl +t/8083_env_sub_drop_n2.pl +t/8084_env_table_drop_n1.pl +t/8085_env_table_drop_n2.pl +## repset replicateTruncate=False test cases t/8075_env_truncate_false_n1.pl t/8076_env_sub_n1n2_truncate_false.pl t/8077_env_truncate_false_n2.pl t/8078_env_sub_n2n1_truncate_false.pl t/8079_env_truncate_replication_check.pl -#t/get_info.py - -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -## -# repset replicateUpdate=False test cases -## +## repset replicateUpdate=False test cases t/8070_env_update_false_n1.pl t/8071_env_sub_n1n2_update_false.pl t/8072_env_update_false_n2.pl t/8073_env_sub_n2n1_update_false.pl t/8074_env_update_replication_check.pl -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -## -# spock node-add and node-drop test cases -## -t/spock_node_add_interface.py -t/spock_node_drop_interface.py -t/spock_node_add_interface_no_db.py -t/spock_node_drop_interface_no_interface.py -t/spock_node_drop_interface_no_db.py - -## -# spock sub-create -r 'repset_array' -## - t/spock_create_sub_specify_repsets.py t/spock_create_sub_specify_repsets_nonrepset_user.py t/spock_sub_remove_repset.py t/spock_sub_remove_repset_error.py - -## -# spock repset-create errors -## - t/spock_repset_create_error_1.py t/spock_repset_create_error_2.py t/spock_repset_create_error_3.py - -# cleanup scripts -## - -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl - -# Set up for the next round of tests - -## Test Spock Module -t/spock_1_setup.py -t/spock_2_node_create.py -t/spock_3_sub_create.py -t/spock_4_repset_add_table.py -t/spock_5_cofirm_replication.py - -## Test Ace Module (currently tested in ace-test) -# t/cluster_1_gen_json.py -# t/ace_1_setup.py -# t/ace_2_diff_table.py -# t/ace_3_diff_table_args.py -# t/ace_4_diff_additional.py -# t/ace_99_cleanup.py - -## Test Drop and Negative Spock Module -t/spock_6_drop.py -t/spock_7_negative_list.py -t/spock_8_negative_create.py - -## Remove components, Clean environment and free ports -t/cleanup_01_node_remove.py -t/cleanup_03_remove_nc.py - -## Run the multi-db cluster tests -t/setup_01_install.py -t/multi-db_cluster_setup.py -t/multi-db_cluster_exercise_ace.py - -## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py t/cleanup_03_remove_nc.py -## Setup for Filtering Tests +## FILTERING t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl - -#Filtering Scripts - +t/spock_2_node_create.py +t/spock_3_sub_create.py t/column_filtering.pl t/row_filtering.pl t/partition_filtering.pl - -## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## Setup for Snowflake Tests - +## SNOWFLAKE t/setup_01_install.py t/setup_03_node_install.py t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl - -# Snowflake Scripts - t/snowflake.py t/snowflake_script.py t/snowflake_spock_cmds.py - -## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py t/cleanup_03_remove_nc.py diff --git a/t/spock_create_sub_specify_repsets.py b/t/spock_create_sub_specify_repsets.py index fcb46ea..75bcf7f 100644 --- a/t/spock_create_sub_specify_repsets.py +++ b/t/spock_create_sub_specify_repsets.py @@ -19,15 +19,9 @@ dbname=os.getenv("EDGE_DB","lcdb") # # Create a subscription with an array of repsets; this is the 'happy path' testcase. -# First, we clean up the environment to remove the subscription. - -check_value = util_test.read_psql("select sub_name from spock.subscription;",host,dbname,port,pw,usr).strip("[]") -if "my_test_sub" in str(check_value): - drop_sub = f"spock sub-drop my_test_sub dbname={dbname}" - drop=util_test.run_cmd("Run spock sub-drop to prepare for test.", drop_sub, f"{cluster_dir}/n1") print("*"*100) -command = f"spock sub-create my_test_sub 'host={host} port={port} user={repuser} dbname={dbname}' {dbname} -r 'this_repset,that_repset,the_other_repset'" +command = f"spock sub-create my_test_sub 'host={host} user={repuser} dbname={dbname} port={port}' {dbname} -r 'this_repset,that_repset,the_other_repset'" res=util_test.run_cmd("Run spock sub-create -r.", command, f"{cluster_dir}/n1") print(f"Print our command here: {command}") print(f"Print res.stdout here: - {res.stdout}") From f28cc00b21c16e1c7b6f14041334f9eb43182160 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Mon, 28 Oct 2024 19:39:07 +0500 Subject: [PATCH 28/48] [AutoDDL] Add setup and cleanup SQL scripts for standardized superuser/non-superuser roles Introduced two SQL scripts, 6001 (for setup) and 6901 (for cleanup), to create consistent superuser (named adminuser) and non-superuser (named appuser) roles. These roles will be used in autoddl test scripts to execute AutoDDL operations, primarily from a non-superuser. They also help ensure consistent output across systems with varying default superuser names derived from system user name. This commit also includes corresponding .out files to capture expected outputs. --- schedule_files/auto_ddl_schedule | 2 ++ .../6001_env_prereq_autoddl_setup_n1.out | 33 +++++++++++++++++++ .../6001_env_prereq_autoddl_setup_n1.sql | 29 ++++++++++++++++ t/auto_ddl/6901_env_cleanup_autoddl_n1.out | 16 +++++++++ t/auto_ddl/6901_env_cleanup_autoddl_n1.sql | 11 +++++++ 5 files changed, 91 insertions(+) create mode 100644 t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out create mode 100644 t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql create mode 100644 t/auto_ddl/6901_env_cleanup_autoddl_n1.out create mode 100644 t/auto_ddl/6901_env_cleanup_autoddl_n1.sql diff --git a/schedule_files/auto_ddl_schedule b/schedule_files/auto_ddl_schedule index 5d46d52..4ecae50 100644 --- a/schedule_files/auto_ddl_schedule +++ b/schedule_files/auto_ddl_schedule @@ -21,6 +21,7 @@ t/6011_setup_autoddl_gucs_on_n2.pl ## # autoDDL scripts ## +t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql t/auto_ddl/6100b_table_validate_and_drop_n2.sql t/auto_ddl/6100c_table_validate_n1.sql @@ -48,6 +49,7 @@ t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql t/auto_ddl/6666a_all_objects_create_n1.sql t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql t/auto_ddl/6666c_all_objects_validate_n1.sql +t/auto_ddl/6901_env_cleanup_autoddl_n1.sql ## # cleanup scripts ## diff --git a/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out new file mode 100644 index 0000000..b5c71b4 --- /dev/null +++ b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out @@ -0,0 +1,33 @@ +-- This is a pre-req file that needs to executed prior to any of the autoDDL sql tests +-- This will create the necessary shared objects needed by the autoDDL tests +--creating a superuser +CREATE ROLE adminuser SUPERUSER LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +--creating a non superuser that will have access to the public schema as well as user schemas +-- the permission on the public schema will be granted here whereas the individual schema privileges +-- will be assigned in the individual tests. +CREATE ROLE appuser LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser; +INFO: DDL statement replicated. +GRANT +-- Creating a function with SECURITY DEFINER privileges so that a nonsuper +-- can query the spock.table catalog to check for tables' repset assignments +CREATE OR REPLACE FUNCTION public.get_table_repset_info(partial_name TEXT) +RETURNS TABLE (nspname TEXT, relname TEXT, set_name TEXT) +LANGUAGE sql +SECURITY DEFINER AS +$$ +SELECT nspname, relname, set_name +FROM spock.tables +WHERE relname LIKE '%' || partial_name || '%' +ORDER BY relid; +$$; +INFO: DDL statement replicated. +CREATE FUNCTION +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser; +INFO: DDL statement replicated. +GRANT diff --git a/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql new file mode 100644 index 0000000..83dc93c --- /dev/null +++ b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql @@ -0,0 +1,29 @@ +-- This is a pre-req file that needs to executed prior to any of the autoDDL sql tests +-- This will create the necessary shared objects needed by the autoDDL tests + +--creating a superuser +CREATE ROLE adminuser SUPERUSER LOGIN; + +--creating a non superuser that will have access to the public schema as well as user schemas +-- the permission on the public schema will be granted here whereas the individual schema privileges +-- will be assigned in the individual tests. +CREATE ROLE appuser LOGIN; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser; + +-- Creating a function with SECURITY DEFINER privileges so that a nonsuper +-- can query the spock.table catalog to check for tables' repset assignments +CREATE OR REPLACE FUNCTION public.get_table_repset_info(partial_name TEXT) +RETURNS TABLE (nspname TEXT, relname TEXT, set_name TEXT) +LANGUAGE sql +SECURITY DEFINER AS +$$ +SELECT nspname, relname, set_name +FROM spock.tables +WHERE relname LIKE '%' || partial_name || '%' +ORDER BY relid; +$$; + +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser; + diff --git a/t/auto_ddl/6901_env_cleanup_autoddl_n1.out b/t/auto_ddl/6901_env_cleanup_autoddl_n1.out new file mode 100644 index 0000000..ef68069 --- /dev/null +++ b/t/auto_ddl/6901_env_cleanup_autoddl_n1.out @@ -0,0 +1,16 @@ +-- This is a autoddl cleanup file cleaning up all objects created via 6001 setup script +DROP OWNED BY adminuser; +INFO: DDL statement replicated. +DROP OWNED +DROP OWNED BY appuser; +INFO: DDL statement replicated. +DROP OWNED +DROP FUNCTION IF EXISTS public.get_table_repset_info(TEXT); +INFO: DDL statement replicated. +DROP FUNCTION +DROP ROLE IF EXISTS appuser; +INFO: DDL statement replicated. +DROP ROLE +DROP ROLE adminuser; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql b/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql new file mode 100644 index 0000000..5952a29 --- /dev/null +++ b/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql @@ -0,0 +1,11 @@ +-- This is a autoddl cleanup file cleaning up all objects created via 6001 setup script + +DROP OWNED BY adminuser; + +DROP OWNED BY appuser; + +DROP FUNCTION IF EXISTS public.get_table_repset_info(TEXT); + +DROP ROLE IF EXISTS appuser; + +DROP ROLE adminuser; From 9076df01ba18f969d5402e110c8d1c5bcac99ac6 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Mon, 28 Oct 2024 20:53:24 +0500 Subject: [PATCH 29/48] [AutoDDL] Update 6100/6111 a,b,c scripts to execute via nonsuperuser and adjust outputs Updated AutoDDL SQL scripts 6100 (a, b, c) and 6111 (a, b, c) to execute primarily under the non-superuser (appuser) role, switching to superuser where necessary. Adjusted the related SQL scripts and expected output files to reflect this change. --- .../6100a_table_datatypes_create_alter_n1.out | 192 ++++++++++-------- .../6100a_table_datatypes_create_alter_n1.sql | 70 ++++--- .../6100b_table_validate_and_drop_n2.out | 86 ++++---- .../6100b_table_validate_and_drop_n2.sql | 27 +-- t/auto_ddl/6100c_table_validate_n1.out | 41 ++-- t/auto_ddl/6100c_table_validate_n1.sql | 32 +-- .../6111a_table_tx_ctas_selectinto_like.out | 167 ++++++++------- .../6111a_table_tx_ctas_selectinto_like.sql | 63 +++--- .../6111b_table_validate_and_drop_n2.out | 153 +++++++------- .../6111b_table_validate_and_drop_n2.sql | 51 ++--- t/auto_ddl/6111c_table_validate_n1.out | 65 +++--- t/auto_ddl/6111c_table_validate_n1.sql | 56 ++--- 12 files changed, 554 insertions(+), 449 deletions(-) diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out index 401e442..f4e343e 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out @@ -1,10 +1,23 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6100a_create_alter_table_n1.sql -- This script creates and alters tables on node n1 to test the autoDDL functionality. -- It includes a wide variety of data types and exercises several CREATE TABLE/ ALTER TABLE DDL constructs. -- Also regularly verifying spock.tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +CREATE SCHEMA IF NOT EXISTS s610; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s610 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Create a table for employee details with various data types CREATE TABLE employees ( emp_id INT PRIMARY KEY, @@ -30,7 +43,7 @@ INSERT INTO employees (emp_id, first_name, last_name, email, hire_date, birth_ti INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default -----------------+-----------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -51,10 +64,10 @@ Indexes: Check constraints: "chk_salary" CHECK (salary > 0::numeric) -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) -- Create a table for department details @@ -75,7 +88,7 @@ INSERT INTO departments (dept_id, dept_name, location, established, budget, acti INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default -------------+------------------------+-----------+----------+--------- dept_id | integer | | not null | @@ -87,10 +100,10 @@ INSERT 0 2 Indexes: "departments_pkey" PRIMARY KEY, btree (dept_id) -EXECUTE spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) -- Alter table employees to add new columns, modify existing columns, and add constraints @@ -111,7 +124,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default -----------------+-----------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -136,10 +149,10 @@ Check constraints: Foreign-key constraints: "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) -- Insert additional data with new columns @@ -166,7 +179,7 @@ INSERT INTO projects (project_id, project_name, start_date, end_date, budget, ac INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- project_id | integer | | not null | @@ -181,10 +194,10 @@ Indexes: Check constraints: "projects_budget_check" CHECK (budget > 0::numeric) -EXECUTE spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); nspname | relname | set_name ---------+----------+---------- - public | projects | default + s610 | projects | default (1 row) -- Create a table for employee projects (many-to-many relationship) @@ -207,7 +220,7 @@ INSERT INTO employee_projects (emp_id, project_id, hours_worked, role) VALUES INSERT 0 3 -- Validate the structure, spock.tables catalog table and data \d employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default --------------+-----------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -220,10 +233,10 @@ Foreign-key constraints: "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) -EXECUTE spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) -- Create additional tables to cover more data types and constraints @@ -246,7 +259,7 @@ INSERT INTO products (product_id, product_name, price, stock_quantity, discontin INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default ---------------------+-----------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -260,10 +273,10 @@ INSERT 0 2 Indexes: "products_pkey" PRIMARY KEY, btree (product_id) -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) -- Alter table products to add and modify columns @@ -278,7 +291,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default ---------------------+-----------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -295,10 +308,10 @@ Indexes: Check constraints: "price_check" CHECK (price > 0::numeric) -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) -- Update product data @@ -319,7 +332,7 @@ INSERT INTO "CaseSensitiveTable" ("ID", "Name", "Value") VALUES INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- ID | integer | | not null | @@ -328,10 +341,10 @@ INSERT 0 2 Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") -EXECUTE spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) -- Create table to test various ALTER TABLE operations @@ -356,7 +369,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | uuid | | not null | @@ -364,10 +377,10 @@ ALTER TABLE Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) -- Create table to test more data types and constraints @@ -395,7 +408,7 @@ INSERT INTO test_tab2 (id, timestamp_col, interval_col, inet_col, cidr_col, maca INSERT 0 1 -- Validate the structure, spock.tables catalog table and data \d test_tab2 - Table "public.test_tab2" + Table "s610.test_tab2" Column | Type | Collation | Nullable | Default ---------------+--------------------------+-----------+----------+--------- id | integer | | not null | @@ -415,10 +428,10 @@ INSERT 0 1 Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) -- Create table to test composite and array types @@ -437,7 +450,7 @@ INSERT INTO test_tab3 (id, name, int_array, text_array) VALUES INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default ------------+------------------------+-----------+----------+--------- id | integer | | not null | @@ -447,10 +460,10 @@ INSERT 0 2 Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) -- creating table without primary key to ensure the default repset is default_insert_only @@ -467,10 +480,10 @@ CREATE TABLE INSERT INTO test_tab4 (id, data) VALUES ('m2eebc99', 'Initial data'); INSERT 0 1 -- Execute prepared statement for the table, repset default_insert_only -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Alter table to add a primary key on the id column @@ -479,7 +492,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | text | | not null | @@ -488,10 +501,10 @@ Indexes: "test_tab4_pkey" PRIMARY KEY, btree (id) -- Execute prepared statement for the table, repset default -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab4 | default + s610 | test_tab4 | default (1 row) -- Alter table to remove primary key @@ -510,17 +523,17 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | old_data | character varying(100) | | | -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Alter table to add a primary key on multiple columns @@ -529,7 +542,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | @@ -538,10 +551,10 @@ Indexes: "test_tab4_pkey" PRIMARY KEY, btree (id, old_data) -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab4 | default + s610 | test_tab4 | default (1 row) -- Alter table to drop the primary key @@ -550,17 +563,17 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | old_data | character varying(100) | | not null | -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Negative test cases to validate constraints and error handling @@ -602,7 +615,7 @@ UPDATE test_tab5 SET character_col = 'upd_char', jsonb_col = '{"updated_key": "u UPDATE 1 -- Validate the structure of the table \d test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default -------------------------+------------------------+-----------+----------+--------- bigint_col | bigint | | not null | @@ -620,15 +633,15 @@ UPDATE 1 Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) -EXECUTE spocktab('test_tab5'); -- default repset expected +SELECT * FROM get_table_repset_info('test_tab5'); -- default repset expected nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Final validation of all tables along with querying the spock.tables \d+ employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -656,14 +669,14 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) Access method: heap -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) \d+ departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- dept_id | integer | | not null | | plain | | | @@ -678,14 +691,14 @@ Referenced by: TABLE "employees" CONSTRAINT "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) Access method: heap -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) \d+ projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- project_id | integer | | not null | | plain | | | @@ -703,14 +716,15 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('projects'); - nspname | relname | set_name ----------+----------+---------- - public | projects | default -(1 row) +SELECT * FROM get_table_repset_info('projects'); + nspname | relname | set_name +---------+-------------------+---------- + s610 | projects | default + s610 | employee_projects | default +(2 rows) \d+ employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -724,14 +738,14 @@ Foreign-key constraints: "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) \d+ products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -749,14 +763,14 @@ Check constraints: "price_check" CHECK (price > 0::numeric) Access method: heap -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) \d+ "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- ID | integer | | not null | | plain | | | @@ -766,14 +780,14 @@ Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") Access method: heap -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) \d+ test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | uuid | | not null | | plain | | | @@ -782,14 +796,14 @@ Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) \d+ test_tab2 - Table "public.test_tab2" + Table "s610.test_tab2" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -810,14 +824,14 @@ Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) \d+ test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -828,28 +842,28 @@ Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) \d+ test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | text | | not null | | extended | | | old_data | character varying(100) | | not null | | extended | | | Access method: heap -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) \d+ test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- bigint_col | bigint | | not null | | plain | | | @@ -868,10 +882,10 @@ Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) Access method: heap -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Validating data in all tables diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql index b9ca681..b705d96 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql @@ -1,10 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6100a_create_alter_table_n1.sql -- This script creates and alters tables on node n1 to test the autoDDL functionality. -- It includes a wide variety of data types and exercises several CREATE TABLE/ ALTER TABLE DDL constructs. -- Also regularly verifying spock.tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; + +CREATE SCHEMA IF NOT EXISTS s610; + +GRANT ALL PRIVILEGES ON SCHEMA s610 TO appuser; + +SET ROLE appuser; + +SET search_path TO s610, public; + -- Create a table for employee details with various data types CREATE TABLE employees ( @@ -30,7 +39,8 @@ INSERT INTO employees (emp_id, first_name, last_name, email, hire_date, birth_ti -- Validate the structure, spock.tables catalog table and data \d employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); + -- Create a table for department details CREATE TABLE departments ( @@ -49,7 +59,7 @@ INSERT INTO departments (dept_id, dept_name, location, established, budget, acti -- Validate the structure, spock.tables catalog table and data \d departments -EXECUTE spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); -- Alter table employees to add new columns, modify existing columns, and add constraints ALTER TABLE employees ADD COLUMN middle_name VARCHAR(100); @@ -60,7 +70,7 @@ ALTER TABLE employees RENAME COLUMN street_address TO address; -- Validate the structure, spock.tables catalog table and data \d employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); -- Insert additional data with new columns INSERT INTO employees (emp_id, first_name, middle_name, last_name, email, hire_date, birth_time, salary, full_time, address, metadata, start_timestamp, emp_coordinates, dept_id) VALUES @@ -85,7 +95,7 @@ INSERT INTO projects (project_id, project_name, start_date, end_date, budget, ac -- Validate the structure, spock.tables catalog table and data \d projects -EXECUTE spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); -- Create a table for employee projects (many-to-many relationship) CREATE TABLE employee_projects ( @@ -106,7 +116,7 @@ INSERT INTO employee_projects (emp_id, project_id, hours_worked, role) VALUES -- Validate the structure, spock.tables catalog table and data \d employee_projects -EXECUTE spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); -- Create additional tables to cover more data types and constraints CREATE TABLE products ( @@ -127,7 +137,7 @@ INSERT INTO products (product_id, product_name, price, stock_quantity, discontin -- Validate the structure, spock.tables catalog table and data \d products -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); -- Alter table products to add and modify columns ALTER TABLE products ADD COLUMN category VARCHAR(50); @@ -136,7 +146,7 @@ ALTER TABLE products ADD CONSTRAINT price_check CHECK (price > 0); -- Validate the structure, spock.tables catalog table and data \d products -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); -- Update product data UPDATE products SET stock_quantity = 150 WHERE product_id = 1; @@ -155,7 +165,7 @@ INSERT INTO "CaseSensitiveTable" ("ID", "Name", "Value") VALUES -- Validate the structure, spock.tables catalog table and data \d "CaseSensitiveTable" -EXECUTE spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); -- Create table to test various ALTER TABLE operations CREATE TABLE test_tab1 ( @@ -173,7 +183,7 @@ ALTER TABLE test_tab1 RENAME COLUMN data TO old_data; -- Validate the structure, spock.tables catalog table and data \d test_tab1 -EXECUTE spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); -- Create table to test more data types and constraints CREATE TABLE test_tab2 ( @@ -199,7 +209,7 @@ INSERT INTO test_tab2 (id, timestamp_col, interval_col, inet_col, cidr_col, maca -- Validate the structure, spock.tables catalog table and data \d test_tab2 -EXECUTE spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); -- Create table to test composite and array types CREATE TABLE test_tab3 ( @@ -216,7 +226,7 @@ INSERT INTO test_tab3 (id, name, int_array, text_array) VALUES -- Validate the structure, spock.tables catalog table and data \d test_tab3 -EXECUTE spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); -- creating table without primary key to ensure the default repset is default_insert_only -- and then play around with adding primary key and dropping them to see the repset @@ -231,14 +241,14 @@ CREATE TABLE test_tab4 ( -- Insert initial data into test_tab4 INSERT INTO test_tab4 (id, data) VALUES ('m2eebc99', 'Initial data'); -- Execute prepared statement for the table, repset default_insert_only -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to add a primary key on the id column ALTER TABLE test_tab4 ADD PRIMARY KEY (id); -- Display the table structure \d test_tab4 -- Execute prepared statement for the table, repset default -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to remove primary key ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; @@ -251,7 +261,7 @@ ALTER TABLE test_tab4 RENAME COLUMN data TO old_data; -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to add a primary key on multiple columns ALTER TABLE test_tab4 ADD PRIMARY KEY (id, old_data); @@ -259,7 +269,7 @@ ALTER TABLE test_tab4 ADD PRIMARY KEY (id, old_data); -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to drop the primary key ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; @@ -267,7 +277,7 @@ ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Negative test cases to validate constraints and error handling -- Attempt to insert a record with a duplicate primary key (should fail) @@ -305,41 +315,41 @@ UPDATE test_tab5 SET character_col = 'upd_char', jsonb_col = '{"updated_key": "u -- Validate the structure of the table \d test_tab5 -EXECUTE spocktab('test_tab5'); -- default repset expected +SELECT * FROM get_table_repset_info('test_tab5'); -- default repset expected -- Final validation of all tables along with querying the spock.tables \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); -- Validating data in all tables SELECT * FROM employees ORDER BY emp_id; diff --git a/t/auto_ddl/6100b_table_validate_and_drop_n2.out b/t/auto_ddl/6100b_table_validate_and_drop_n2.out index 4f1aa62..00d852b 100644 --- a/t/auto_ddl/6100b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6100b_table_validate_and_drop_n2.out @@ -1,12 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- AutoDDL validation on n2 to ensure all the DDL/DML performed in the 6100a files on n1 -- was auto replicated to n2. -- In the end, the same objects are dropped. --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Final validation of all tables along with querying the spock.tables \d+ employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -34,14 +41,14 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) Access method: heap -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) \d+ departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- dept_id | integer | | not null | | plain | | | @@ -56,14 +63,14 @@ Referenced by: TABLE "employees" CONSTRAINT "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) Access method: heap -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) \d+ projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- project_id | integer | | not null | | plain | | | @@ -81,14 +88,15 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('projects'); - nspname | relname | set_name ----------+----------+---------- - public | projects | default -(1 row) +SELECT * FROM get_table_repset_info('projects'); + nspname | relname | set_name +---------+-------------------+---------- + s610 | projects | default + s610 | employee_projects | default +(2 rows) \d+ employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -102,14 +110,14 @@ Foreign-key constraints: "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) \d+ products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -127,14 +135,14 @@ Check constraints: "price_check" CHECK (price > 0::numeric) Access method: heap -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) \d+ "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- ID | integer | | not null | | plain | | | @@ -144,14 +152,14 @@ Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") Access method: heap -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) \d+ test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | uuid | | not null | | plain | | | @@ -160,14 +168,14 @@ Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) \d+ test_tab2 - Table "public.test_tab2" + Table "s610.test_tab2" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -188,14 +196,14 @@ Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) \d+ test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -206,28 +214,28 @@ Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) \d+ test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | text | | not null | | extended | | | old_data | character varying(100) | | not null | | extended | | | Access method: heap -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) \d+ test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- bigint_col | bigint | | not null | | plain | | | @@ -246,10 +254,10 @@ Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) Access method: heap -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Validating data in all tables diff --git a/t/auto_ddl/6100b_table_validate_and_drop_n2.sql b/t/auto_ddl/6100b_table_validate_and_drop_n2.sql index 31b07b3..313198e 100644 --- a/t/auto_ddl/6100b_table_validate_and_drop_n2.sql +++ b/t/auto_ddl/6100b_table_validate_and_drop_n2.sql @@ -1,44 +1,45 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated -- AutoDDL validation on n2 to ensure all the DDL/DML performed in the 6100a files on n1 -- was auto replicated to n2. -- In the end, the same objects are dropped. --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; +SET search_path TO s610, public; -- Final validation of all tables along with querying the spock.tables \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); -- Validating data in all tables SELECT * FROM employees ORDER BY emp_id; diff --git a/t/auto_ddl/6100c_table_validate_n1.out b/t/auto_ddl/6100c_table_validate_n1.out index d17ed35..6fa0c4a 100644 --- a/t/auto_ddl/6100c_table_validate_n1.out +++ b/t/auto_ddl/6100c_table_validate_n1.out @@ -1,85 +1,98 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- Final AutoDDL validation for the 6100 series on n1 to ensure all the DROP TABLE performed in the 6100b files on n2 -- was auto replicated to n1. -- None of the Tables should exist and spock.tables should not contain any entries for these tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Final validation of all tables along with querying the spock.tables -- validating all tables dropped on n1 \d+ employees Did not find any relation named "employees". -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ departments Did not find any relation named "departments". -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ projects Did not find any relation named "projects". -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ employee_projects Did not find any relation named "employee_projects". -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ products Did not find any relation named "products". -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ "CaseSensitiveTable" Did not find any relation named ""CaseSensitiveTable"". -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab1 Did not find any relation named "test_tab1". -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab2 Did not find any relation named "test_tab2". -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab3 Did not find any relation named "test_tab3". -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab4 Did not find any relation named "test_tab4". -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab5 Did not find any relation named "test_tab5". -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+---------+---------- (0 rows) +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s610 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6100c_table_validate_n1.sql b/t/auto_ddl/6100c_table_validate_n1.sql index bcce4cf..9be6652 100644 --- a/t/auto_ddl/6100c_table_validate_n1.sql +++ b/t/auto_ddl/6100c_table_validate_n1.sql @@ -1,42 +1,48 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated -- Final AutoDDL validation for the 6100 series on n1 to ensure all the DROP TABLE performed in the 6100b files on n2 -- was auto replicated to n1. -- None of the Tables should exist and spock.tables should not contain any entries for these tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s610, public; -- Final validation of all tables along with querying the spock.tables -- validating all tables dropped on n1 \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s610 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out index c9e4e27..187818e 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out @@ -1,11 +1,24 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This script covers the following CREATE TABLE constructs for AutoDDL: -- CREATE TABLE in transactions -- CREATE TABLE AS -- SELECT .. INTO .. FROM EXISTING -- CREATE TABLE LIKE --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +CREATE SCHEMA IF NOT EXISTS s611; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s611 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET ---------------------------- -- Table DDL in transactions ---------------------------- @@ -19,17 +32,17 @@ CREATE TABLE COMMIT; COMMIT \d sub_tx_table0 - Table "public.sub_tx_table0" + Table "s611.sub_tx_table0" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table0_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table0'); --default repset +SELECT * FROM get_table_repset_info('sub_tx_table0'); --default repset nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table0 | default + s611 | sub_tx_table0 | default (1 row) -- DDL within tx, Rollback @@ -44,7 +57,7 @@ ROLLBACK; ROLLBACK \d sub_tx_table0a Did not find any relation named "sub_tx_table0a". -EXECUTE spocktab('sub_tx_table0a'); +SELECT * FROM get_table_repset_info('sub_tx_table0a'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -67,7 +80,7 @@ COMMIT; COMMIT \d sub_tx_table1 Did not find any relation named "sub_tx_table1". -EXECUTE spocktab('sub_tx_table1'); +SELECT * FROM get_table_repset_info('sub_tx_table1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -93,15 +106,15 @@ ROLLBACK COMMIT; COMMIT \d sub_tx_table2 - Table "public.sub_tx_table2" + Table "s611.sub_tx_table2" Column | Type | Collation | Nullable | Default --------+--------+-----------+----------+--------- c | bigint | | | -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); nspname | relname | set_name ---------+---------------+--------------------- - public | sub_tx_table2 | default_insert_only + s611 | sub_tx_table2 | default_insert_only (1 row) BEGIN; @@ -115,7 +128,7 @@ INSERT 0 5 END; COMMIT \d sub_tx_table3 - Table "public.sub_tx_table3" + Table "s611.sub_tx_table3" Column | Type | Collation | Nullable | Default --------+----------+-----------+----------+--------- a | smallint | | not null | @@ -133,10 +146,10 @@ SELECT * FROM sub_tx_table3 order by a; 777 | 777.777 (5 rows) -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table3 | default + s611 | sub_tx_table3 | default (1 row) BEGIN; @@ -158,7 +171,7 @@ ROLLBACK --table sub_tx_table4 should not exist \d sub_tx_table4 Did not find any relation named "sub_tx_table4". -EXECUTE spocktab('sub_tx_table4'); +SELECT * FROM get_table_repset_info('sub_tx_table4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -199,48 +212,50 @@ WARNING: there is no transaction in progress COMMIT -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c should exist, sub_tx_table5b should not \d sub_tx_table5 - Table "public.sub_tx_table5" + Table "s611.sub_tx_table5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5'); -- should be in default_insert_only set - nspname | relname | set_name ----------+---------------+--------------------- - public | sub_tx_table5 | default_insert_only -(1 row) +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- should be in default_insert_only set + nspname | relname | set_name +---------+----------------+--------------------- + s611 | sub_tx_table5 | default_insert_only + s611 | sub_tx_table5a | default + s611 | sub_tx_table5c | default_insert_only +(3 rows) \d sub_tx_table5a - Table "public.sub_tx_table5a" + Table "s611.sub_tx_table5a" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table5a_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table5a'); -- should be in default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- should be in default nspname | relname | set_name ---------+----------------+---------- - public | sub_tx_table5a | default + s611 | sub_tx_table5a | default (1 row) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5c - Table "public.sub_tx_table5c" + Table "s611.sub_tx_table5c" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5c'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------------+--------------------- - public | sub_tx_table5c | default_insert_only + s611 | sub_tx_table5c | default_insert_only (1 row) ----------------------- @@ -273,17 +288,17 @@ WARNING: DDL statement replicated, but could be unsafe. CREATE TABLE AS -- Validate table_ctas1 \d table_ctas1 - Table "public.table_ctas1" + Table "s611.table_ctas1" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | age | integer | | | -EXECUTE spocktab('table_ctas1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas1'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas1 | default_insert_only + s611 | table_ctas1 | default_insert_only (1 row) -- CREATE TABLE AS with specific columns and data @@ -298,7 +313,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate table_ctas2 \d table_ctas2 - Table "public.table_ctas2" + Table "s611.table_ctas2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -306,10 +321,10 @@ ALTER TABLE Indexes: "table_ctas2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas2'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas2'); -- should be in default set nspname | relname | set_name ---------+-------------+---------- - public | table_ctas2 | default + s611 | table_ctas2 | default (1 row) -- CREATE TABLE AS with VALUES clause and primary key @@ -322,7 +337,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate table_ctas3 \d table_ctas3 - Table "public.table_ctas3" + Table "s611.table_ctas3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -330,10 +345,10 @@ ALTER TABLE Indexes: "table_ctas3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas3'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas3'); -- should be in default set nspname | relname | set_name ---------+-------------+---------- - public | table_ctas3 | default + s611 | table_ctas3 | default (1 row) -- CREATE TABLE AS with query and using WITH NO DATA @@ -344,17 +359,17 @@ WARNING: DDL statement replicated, but could be unsafe. CREATE TABLE AS -- Validate table_ctas4 \d table_ctas4 - Table "public.table_ctas4" + Table "s611.table_ctas4" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | double_age | integer | | | -EXECUTE spocktab('table_ctas4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas4'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas4 | default_insert_only + s611 | table_ctas4 | default_insert_only (1 row) -- CREATE TABLE AS with expression @@ -364,15 +379,15 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 10 -- Validate table_ctas5 \d table_ctas5 - Table "public.table_ctas5" + Table "s611.table_ctas5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- num | integer | | | -EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas5'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas5 | default_insert_only + s611 | table_ctas5 | default_insert_only (1 row) -- CREATE TABLE AS with explain analyze, redirecting the output to /dev/null so that the varying query plan is not @@ -384,15 +399,15 @@ WARNING: DDL statement replicated, but could be unsafe. \o -- Validate table_ctas6 \d table_ctas6 - Table "public.table_ctas6" + Table "s611.table_ctas6" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- a | integer | | | -EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas6'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas6 | default_insert_only + s611 | table_ctas6 | default_insert_only (1 row) ----------------------------------- @@ -421,7 +436,7 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 4 -- Validate table_si1 \d table_si1 - Table "public.table_si1" + Table "s611.table_si1" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | @@ -430,10 +445,10 @@ SELECT 4 column3 | date | | | column4 | boolean | | | -EXECUTE spocktab('table_si1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si1'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si1 | default_insert_only + s611 | table_si1 | default_insert_only (1 row) -- SELECT INTO with specific columns and conditions @@ -442,17 +457,17 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si2 \d table_si2 - Table "public.table_si2" + Table "s611.table_si2" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | column2 | integer | | | -EXECUTE spocktab('table_si2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si2'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si2 | default_insert_only + s611 | table_si2 | default_insert_only (1 row) -- Expected data: (3, 'value3', 30), (4, 'value4', 40) @@ -462,16 +477,16 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si3 \d table_si3 - Table "public.table_si3" + Table "s611.table_si3" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- column4 | boolean | | | count | bigint | | | -EXECUTE spocktab('table_si3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si3'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si3 | default_insert_only + s611 | table_si3 | default_insert_only (1 row) -- Expected data: (TRUE, 2), (FALSE, 2) @@ -481,16 +496,16 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si4 \d table_si4 - Table "public.table_si4" + Table "s611.table_si4" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | -EXECUTE spocktab('table_si4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si4'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si4 | default_insert_only + s611 | table_si4 | default_insert_only (1 row) -- Expected data: (4, 'value4'), (3, 'value3') @@ -519,17 +534,17 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si5 \d table_si5 - Table "public.table_si5" + Table "s611.table_si5" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | column1 | text | | | extra_data | character varying(50) | | | -EXECUTE spocktab('table_si5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si5'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si5 | default_insert_only + s611 | table_si5 | default_insert_only (1 row) -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') @@ -567,16 +582,16 @@ CREATE TABLE -- Validate table_l1 -- Expected columns: col1 (without primary key), col2 (with default 'default_text') \d table_l1 - Table "public.table_l1" + Table "s611.table_l1" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+---------------------- col1 | integer | | not null | col2 | text | | | 'default_text'::text -EXECUTE spocktab('table_l1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l1'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l1 | default_insert_only + s611 | table_l1 | default_insert_only (1 row) -- Create table using LIKE excluding defaults @@ -586,16 +601,16 @@ CREATE TABLE -- Validate table_l2 -- Expected columns: col1 (without primary key), col2 (without default) \d table_l2 - Table "public.table_l2" + Table "s611.table_l2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | col2 | text | | | -EXECUTE spocktab('table_l2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l2'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l2 | default_insert_only + s611 | table_l2 | default_insert_only (1 row) -- Create table using LIKE including all properties @@ -605,7 +620,7 @@ CREATE TABLE -- Validate table_l3 -- Expected columns: col1, col2, col3 (with check constraint and unique constraint) \d table_l3 - Table "public.table_l3" + Table "s611.table_l3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | @@ -616,10 +631,10 @@ Indexes: Check constraints: "chk_col1" CHECK (col1 > 0) -EXECUTE spocktab('table_l3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l3'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l3 | default_insert_only + s611 | table_l3 | default_insert_only (1 row) -- Create table using LIKE excluding constraints @@ -629,17 +644,17 @@ CREATE TABLE -- Validate table_l4 -- Expected columns: col1, col2, col3 (without constraints) \d table_l4 - Table "public.table_l4" + Table "s611.table_l4" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | col2 | text | | | col3 | date | | | -EXECUTE spocktab('table_l4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l4'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l4 | default_insert_only + s611 | table_l4 | default_insert_only (1 row) -- Create table using LIKE including indexes @@ -649,7 +664,7 @@ CREATE TABLE -- Validate table_l5 -- Expected columns: col1 (primary key), col2 (without default), indexes copied \d table_l5 - Table "public.table_l5" + Table "s611.table_l5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | @@ -657,10 +672,10 @@ CREATE TABLE Indexes: "table_l5_pkey" PRIMARY KEY, btree (col1) -EXECUTE spocktab('table_l5'); -- should be in default set +SELECT * FROM get_table_repset_info('table_l5'); -- should be in default set nspname | relname | set_name ---------+----------+---------- - public | table_l5 | default + s611 | table_l5 | default (1 row) -- Insert data into the LIKE created tables to validate defaults and constraints diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql index 3c95d56..011d86d 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql @@ -1,3 +1,5 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This script covers the following CREATE TABLE constructs for AutoDDL: -- CREATE TABLE in transactions -- CREATE TABLE AS @@ -5,8 +7,13 @@ -- CREATE TABLE LIKE --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +CREATE SCHEMA IF NOT EXISTS s611; + +GRANT ALL PRIVILEGES ON SCHEMA s611 TO appuser; + +SET ROLE appuser; + +SET search_path TO s611, public; ---------------------------- -- Table DDL in transactions @@ -19,7 +26,7 @@ CREATE TABLE sub_tx_table0 (c int primary key); COMMIT; \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); --default repset +SELECT * FROM get_table_repset_info('sub_tx_table0'); --default repset -- DDL within tx, Rollback -- table will not get created on n1 and therefore nothing should replicate to n2 @@ -29,7 +36,7 @@ CREATE TABLE sub_tx_table0a (c int); ROLLBACK; \d sub_tx_table0a -EXECUTE spocktab('sub_tx_table0a'); +SELECT * FROM get_table_repset_info('sub_tx_table0a'); --DDL within transaction and savepoints and rollback/commit --table sub_tx_table1 will not be created so it should not get replicated @@ -41,7 +48,7 @@ CREATE TABLE sub_tx_table1 (c int); COMMIT; \d sub_tx_table1 -EXECUTE spocktab('sub_tx_table1'); +SELECT * FROM get_table_repset_info('sub_tx_table1'); --ALTERING TABLE within transaction, savepoints, rollback -- After commit, the table should have c column datatype to bigint @@ -54,7 +61,7 @@ BEGIN; COMMIT; \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); BEGIN; CREATE TABLE sub_tx_table3 (a smallint primary key, b real); @@ -64,7 +71,7 @@ END; \d sub_tx_table3 SELECT * FROM sub_tx_table3 order by a; -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); BEGIN; CREATE TABLE sub_tx_table4 (a int4 primary key); @@ -74,7 +81,7 @@ SELECT count(*) from sub_tx_table3;--0 rows ABORT;--rollback --table sub_tx_table4 should not exist \d sub_tx_table4 -EXECUTE spocktab('sub_tx_table4'); +SELECT * FROM get_table_repset_info('sub_tx_table4'); SELECT count(*) from sub_tx_table3;--5 rows, which should also exist on n2 (validated in the 6111b file) -- Nested transactions with multiple savepoints and a mix of rollbacks and commits @@ -92,13 +99,13 @@ COMMIT; -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c should exist, sub_tx_table5b should not \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- should be in default_insert_only set \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); -- should be in default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- should be in default \d sub_tx_table5b -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- should be in default_insert_only set @@ -129,7 +136,7 @@ SELECT id, name FROM table_base1; -- Validate table_ctas1 \d table_ctas1 -EXECUTE spocktab('table_ctas1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas1'); -- should be in default_insert_only set -- CREATE TABLE AS with specific columns and data CREATE TABLE IF NOT EXISTS table_ctas2 AS @@ -141,7 +148,7 @@ ALTER TABLE table_ctas2 ADD PRIMARY KEY (id); -- Validate table_ctas2 \d table_ctas2 -EXECUTE spocktab('table_ctas2'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas2'); -- should be in default set -- CREATE TABLE AS with VALUES clause and primary key CREATE TABLE table_ctas3 (id, value) AS @@ -150,7 +157,7 @@ ALTER TABLE table_ctas3 ADD PRIMARY KEY (id); -- Validate table_ctas3 \d table_ctas3 -EXECUTE spocktab('table_ctas3'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas3'); -- should be in default set -- CREATE TABLE AS with query and using WITH NO DATA CREATE TABLE table_ctas4 AS @@ -159,7 +166,7 @@ WHERE age <= 30 WITH NO DATA; -- Validate table_ctas4 \d table_ctas4 -EXECUTE spocktab('table_ctas4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas4'); -- should be in default_insert_only set -- CREATE TABLE AS with expression CREATE TABLE table_ctas5 AS @@ -167,7 +174,7 @@ SELECT generate_series(1, 10) AS num; -- Validate table_ctas5 \d table_ctas5 -EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas5'); -- should be in default_insert_only set -- CREATE TABLE AS with explain analyze, redirecting the output to /dev/null so that the varying query plan is not -- captured in the expected output, to keep our output consistent across runs. @@ -178,7 +185,7 @@ SELECT 1 AS a; -- Validate table_ctas6 \d table_ctas6 -EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas6'); -- should be in default_insert_only set ----------------------------------- -- Create table using SELECT .. INTO .. @@ -205,14 +212,14 @@ SELECT * INTO table_si1 FROM table_existing1; -- Validate table_si1 \d table_si1 -EXECUTE spocktab('table_si1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si1'); -- should be in default_insert_only set -- SELECT INTO with specific columns and conditions SELECT id, column1, column2 INTO table_si2 FROM table_existing1 WHERE column2 > 20; -- Validate table_si2 \d table_si2 -EXECUTE spocktab('table_si2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si2'); -- should be in default_insert_only set -- Expected data: (3, 'value3', 30), (4, 'value4', 40) -- SELECT INTO with GROUP BY and HAVING @@ -220,7 +227,7 @@ SELECT column4, COUNT(*) AS count INTO table_si3 FROM table_existing1 GROUP BY c -- Validate table_si3 \d table_si3 -EXECUTE spocktab('table_si3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si3'); -- should be in default_insert_only set -- Expected data: (TRUE, 2), (FALSE, 2) -- SELECT INTO with ORDER BY and LIMIT @@ -228,7 +235,7 @@ SELECT id, column1 INTO table_si4 FROM table_existing1 ORDER BY column2 DESC LIM -- Validate table_si4 \d table_si4 -EXECUTE spocktab('table_si4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si4'); -- should be in default_insert_only set -- Expected data: (4, 'value4'), (3, 'value3') -- Complex SELECT INTO with JOIN, GROUP BY, ORDER BY, and LIMIT @@ -254,7 +261,7 @@ LIMIT 3; -- Validate table_si5 \d table_si5 -EXECUTE spocktab('table_si5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si5'); -- should be in default_insert_only set -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') --------------------- @@ -289,7 +296,7 @@ CREATE TABLE table_l1 (LIKE table_base1a INCLUDING DEFAULTS INCLUDING CONSTRAINT -- Validate table_l1 -- Expected columns: col1 (without primary key), col2 (with default 'default_text') \d table_l1 -EXECUTE spocktab('table_l1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l1'); -- should be in default_insert_only set -- Create table using LIKE excluding defaults @@ -298,7 +305,7 @@ CREATE TABLE table_l2 (LIKE table_base1a EXCLUDING DEFAULTS); -- Validate table_l2 -- Expected columns: col1 (without primary key), col2 (without default) \d table_l2 -EXECUTE spocktab('table_l2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l2'); -- should be in default_insert_only set -- Create table using LIKE including all properties @@ -307,7 +314,7 @@ CREATE TABLE table_l3 (LIKE table_base2 INCLUDING ALL); -- Validate table_l3 -- Expected columns: col1, col2, col3 (with check constraint and unique constraint) \d table_l3 -EXECUTE spocktab('table_l3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l3'); -- should be in default_insert_only set -- Create table using LIKE excluding constraints CREATE TABLE table_l4 (LIKE table_base2 EXCLUDING CONSTRAINTS); @@ -315,7 +322,7 @@ CREATE TABLE table_l4 (LIKE table_base2 EXCLUDING CONSTRAINTS); -- Validate table_l4 -- Expected columns: col1, col2, col3 (without constraints) \d table_l4 -EXECUTE spocktab('table_l4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l4'); -- should be in default_insert_only set -- Create table using LIKE including indexes CREATE TABLE table_l5 (LIKE table_base1a INCLUDING INDEXES); @@ -323,7 +330,7 @@ CREATE TABLE table_l5 (LIKE table_base1a INCLUDING INDEXES); -- Validate table_l5 -- Expected columns: col1 (primary key), col2 (without default), indexes copied \d table_l5 -EXECUTE spocktab('table_l5'); -- should be in default set +SELECT * FROM get_table_repset_info('table_l5'); -- should be in default set -- Insert data into the LIKE created tables to validate defaults and constraints diff --git a/t/auto_ddl/6111b_table_validate_and_drop_n2.out b/t/auto_ddl/6111b_table_validate_and_drop_n2.out index 686c1a0..438c6f4 100644 --- a/t/auto_ddl/6111b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6111b_table_validate_and_drop_n2.out @@ -1,21 +1,28 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6111b - Validate and drop tables on n2 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET -- Validate sub_tx_table0 -- Expected: table exists with column c of type int and primary key \d sub_tx_table0 - Table "public.sub_tx_table0" + Table "s611.sub_tx_table0" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table0_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table0'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Replication set: default nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table0 | default + s611 | sub_tx_table0 | default (1 row) -- Validate sub_tx_table0a @@ -29,21 +36,21 @@ Did not find any relation named "sub_tx_table1". -- Validate sub_tx_table2 -- Expected: table exists with column c of type bigint \d sub_tx_table2 - Table "public.sub_tx_table2" + Table "s611.sub_tx_table2" Column | Type | Collation | Nullable | Default --------+--------+-----------+----------+--------- c | bigint | | | -EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+---------------+--------------------- - public | sub_tx_table2 | default_insert_only + s611 | sub_tx_table2 | default_insert_only (1 row) -- Validate sub_tx_table3 -- Expected: table exists with columns a (smallint, primary key) and b (real) \d sub_tx_table3 - Table "public.sub_tx_table3" + Table "s611.sub_tx_table3" Column | Type | Collation | Nullable | Default --------+----------+-----------+----------+--------- a | smallint | | not null | @@ -51,10 +58,10 @@ EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only Indexes: "sub_tx_table3_pkey" PRIMARY KEY, btree (a) -EXECUTE spocktab('sub_tx_table3'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Replication set: default nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table3 | default + s611 | sub_tx_table3 | default (1 row) -- Expected data: (0, 0.09561), (42, 324.78), (56, 7.8), (100, 99.097), (777, 777.777) @@ -74,59 +81,61 @@ SELECT * FROM sub_tx_table3 ORDER BY a; Did not find any relation named "sub_tx_table4". -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c, sub_tx_table5b should not exist \d sub_tx_table5 - Table "public.sub_tx_table5" + Table "s611.sub_tx_table5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5'); -- Replication set: default_insert_only - nspname | relname | set_name ----------+---------------+--------------------- - public | sub_tx_table5 | default_insert_only -(1 row) +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- Replication set: default_insert_only + nspname | relname | set_name +---------+----------------+--------------------- + s611 | sub_tx_table5 | default_insert_only + s611 | sub_tx_table5a | default + s611 | sub_tx_table5c | default_insert_only +(3 rows) \d sub_tx_table5a - Table "public.sub_tx_table5a" + Table "s611.sub_tx_table5a" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table5a_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table5a'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- Replication set: default nspname | relname | set_name ---------+----------------+---------- - public | sub_tx_table5a | default + s611 | sub_tx_table5a | default (1 row) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". \d sub_tx_table5c - Table "public.sub_tx_table5c" + Table "s611.sub_tx_table5c" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5c'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------------+--------------------- - public | sub_tx_table5c | default_insert_only + s611 | sub_tx_table5c | default_insert_only (1 row) -- Validate table_ctas1 -- Expected: table exists with columns id (int), name (varchar), age (int) \d table_ctas1 - Table "public.table_ctas1" + Table "s611.table_ctas1" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | age | integer | | | -EXECUTE spocktab('table_ctas1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas1'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas1 | default_insert_only + s611 | table_ctas1 | default_insert_only (1 row) -- Expected data: (1, 'Alice', 30), (2, 'Bob', 25), (3, 'Carol', 35) @@ -141,7 +150,7 @@ SELECT * FROM table_ctas1 ORDER BY id; -- Validate table_ctas2 -- Expected: table exists with columns id (int), age (int), primary key on id \d table_ctas2 - Table "public.table_ctas2" + Table "s611.table_ctas2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -149,10 +158,10 @@ SELECT * FROM table_ctas1 ORDER BY id; Indexes: "table_ctas2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas2'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas2'); -- Replication set: default nspname | relname | set_name ---------+-------------+---------- - public | table_ctas2 | default + s611 | table_ctas2 | default (1 row) -- Expected data: (3, 35) @@ -165,7 +174,7 @@ SELECT * FROM table_ctas2 ORDER BY id; -- Validate table_ctas3 -- Expected: table exists with columns id (int), value (int), primary key on id \d table_ctas3 - Table "public.table_ctas3" + Table "s611.table_ctas3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -173,10 +182,10 @@ SELECT * FROM table_ctas2 ORDER BY id; Indexes: "table_ctas3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas3'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas3'); -- Replication set: default nspname | relname | set_name ---------+-------------+---------- - public | table_ctas3 | default + s611 | table_ctas3 | default (1 row) -- Expected data: (1, 10), (2, 20), (3, 30) @@ -191,17 +200,17 @@ SELECT * FROM table_ctas3 ORDER BY id; -- Validate table_ctas4 -- Expected: table exists with columns id (int), name (varchar), double_age (int), no data \d table_ctas4 - Table "public.table_ctas4" + Table "s611.table_ctas4" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | double_age | integer | | | -EXECUTE spocktab('table_ctas4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas4 | default_insert_only + s611 | table_ctas4 | default_insert_only (1 row) -- Expected data: empty (no data) @@ -213,15 +222,15 @@ SELECT * FROM table_ctas4 ORDER BY id; -- Validate table_ctas5 -- Expected: table exists with column num (int) \d table_ctas5 - Table "public.table_ctas5" + Table "s611.table_ctas5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- num | integer | | | -EXECUTE spocktab('table_ctas5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas5'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas5 | default_insert_only + s611 | table_ctas5 | default_insert_only (1 row) -- Expected data: 1 through 10 @@ -243,15 +252,15 @@ SELECT * FROM table_ctas5 ORDER BY num; -- Validate table_ctas6 -- Expected: table exists with column a (int) \d table_ctas6 - Table "public.table_ctas6" + Table "s611.table_ctas6" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- a | integer | | | -EXECUTE spocktab('table_ctas6'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas6'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas6 | default_insert_only + s611 | table_ctas6 | default_insert_only (1 row) -- Expected data: 1 @@ -264,7 +273,7 @@ SELECT * FROM table_ctas6 ORDER BY a; -- Validate table_si1 -- Expected: table exists with columns id (int), column1 (text), column2 (int), column3 (date), column4 (boolean) \d table_si1 - Table "public.table_si1" + Table "s611.table_si1" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | @@ -273,10 +282,10 @@ SELECT * FROM table_ctas6 ORDER BY a; column3 | date | | | column4 | boolean | | | -EXECUTE spocktab('table_si1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si1'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si1 | default_insert_only + s611 | table_si1 | default_insert_only (1 row) -- Expected data: (1, 'value1', 10, '2023-01-01', TRUE), (2, 'value2', 20, '2023-01-02', FALSE), (3, 'value3', 30, '2023-01-03', TRUE), (4, 'value4', 40, '2023-01-04', FALSE) @@ -292,17 +301,17 @@ SELECT * FROM table_si1 ORDER BY id; -- Validate table_si2 -- Expected: table exists with columns id (int), column1 (text), column2 (int) \d table_si2 - Table "public.table_si2" + Table "s611.table_si2" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | column2 | integer | | | -EXECUTE spocktab('table_si2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si2 | default_insert_only + s611 | table_si2 | default_insert_only (1 row) -- Expected data: (3, 'value3', 30), (4, 'value4', 40) @@ -316,16 +325,16 @@ SELECT * FROM table_si2 ORDER BY id; -- Validate table_si3 -- Expected: table exists with columns column4 (boolean), count (int) \d table_si3 - Table "public.table_si3" + Table "s611.table_si3" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- column4 | boolean | | | count | bigint | | | -EXECUTE spocktab('table_si3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si3'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si3 | default_insert_only + s611 | table_si3 | default_insert_only (1 row) -- Expected data: (TRUE, 2), (FALSE, 2) @@ -339,16 +348,16 @@ SELECT * FROM table_si3 ORDER BY column4; -- Validate table_si4 -- Expected: table exists with columns id (int), column1 (text) \d table_si4 - Table "public.table_si4" + Table "s611.table_si4" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | -EXECUTE spocktab('table_si4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si4 | default_insert_only + s611 | table_si4 | default_insert_only (1 row) -- Expected data: (4, 'value4'), (3, 'value3') @@ -362,17 +371,17 @@ SELECT * FROM table_si4 ORDER BY id; -- Validate table_si5 -- Expected: table exists with columns id (int), column1 (text), extra_data (varchar) \d table_si5 - Table "public.table_si5" + Table "s611.table_si5" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | column1 | text | | | extra_data | character varying(50) | | | -EXECUTE spocktab('table_si5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si5'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si5 | default_insert_only + s611 | table_si5 | default_insert_only (1 row) -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') @@ -386,16 +395,16 @@ SELECT * FROM table_si5 ORDER BY id; -- Validate table_l1 -- Expected: table exists with columns col1 (int), col2 (text, default 'default_text') \d table_l1 - Table "public.table_l1" + Table "s611.table_l1" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+---------------------- col1 | integer | | not null | col2 | text | | | 'default_text'::text -EXECUTE spocktab('table_l1'); -- Replication set: default_insert_repset +SELECT * FROM get_table_repset_info('table_l1'); -- Replication set: default_insert_repset nspname | relname | set_name ---------+----------+--------------------- - public | table_l1 | default_insert_only + s611 | table_l1 | default_insert_only (1 row) -- Expected data: (3, 'default_text') @@ -408,16 +417,16 @@ SELECT * FROM table_l1 ORDER BY col1; -- Validate table_l2 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l2 - Table "public.table_l2" + Table "s611.table_l2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | col2 | text | | | -EXECUTE spocktab('table_l2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l2 | default_insert_only + s611 | table_l2 | default_insert_only (1 row) -- Expected data: (4, 'text4') @@ -430,7 +439,7 @@ SELECT * FROM table_l2 ORDER BY col1; -- Validate table_l3 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), check constraint, unique constraint \d table_l3 - Table "public.table_l3" + Table "s611.table_l3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | @@ -441,10 +450,10 @@ Indexes: Check constraints: "chk_col1" CHECK (col1 > 0) -EXECUTE spocktab('table_l3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l3'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l3 | default_insert_only + s611 | table_l3 | default_insert_only (1 row) -- Expected data: (3, 'unique_text3', '2023-01-03') @@ -457,17 +466,17 @@ SELECT * FROM table_l3 ORDER BY col1; -- Validate table_l4 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), no constraints \d table_l4 - Table "public.table_l4" + Table "s611.table_l4" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | col2 | text | | | col3 | date | | | -EXECUTE spocktab('table_l4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l4 | default_insert_only + s611 | table_l4 | default_insert_only (1 row) -- Expected data: (4, 'text4', '2023-01-04') @@ -480,7 +489,7 @@ SELECT * FROM table_l4 ORDER BY col1; -- Validate table_l5 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l5 - Table "public.table_l5" + Table "s611.table_l5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | @@ -488,10 +497,10 @@ SELECT * FROM table_l4 ORDER BY col1; Indexes: "table_l5_pkey" PRIMARY KEY, btree (col1) -EXECUTE spocktab('table_l5'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_l5'); -- Replication set: default nspname | relname | set_name ---------+----------+---------- - public | table_l5 | default + s611 | table_l5 | default (1 row) -- Expected data: (5, ) diff --git a/t/auto_ddl/6111b_table_validate_and_drop_n2.sql b/t/auto_ddl/6111b_table_validate_and_drop_n2.sql index 70b97f6..ae9ebb6 100644 --- a/t/auto_ddl/6111b_table_validate_and_drop_n2.sql +++ b/t/auto_ddl/6111b_table_validate_and_drop_n2.sql @@ -1,12 +1,15 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6111b - Validate and drop tables on n2 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s611, public; -- Validate sub_tx_table0 -- Expected: table exists with column c of type int and primary key \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Replication set: default -- Validate sub_tx_table0a -- Expected: table does not exist @@ -19,12 +22,12 @@ EXECUTE spocktab('sub_tx_table0'); -- Replication set: default -- Validate sub_tx_table2 -- Expected: table exists with column c of type bigint \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Replication set: default_insert_only -- Validate sub_tx_table3 -- Expected: table exists with columns a (smallint, primary key) and b (real) \d sub_tx_table3 -EXECUTE spocktab('sub_tx_table3'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Replication set: default -- Expected data: (0, 0.09561), (42, 324.78), (56, 7.8), (100, 99.097), (777, 777.777) SELECT * FROM sub_tx_table3 ORDER BY a; @@ -34,126 +37,126 @@ SELECT * FROM sub_tx_table3 ORDER BY a; -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c, sub_tx_table5b should not exist \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- Replication set: default_insert_only \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- Replication set: default \d sub_tx_table5b \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Replication set: default_insert_only -- Validate table_ctas1 -- Expected: table exists with columns id (int), name (varchar), age (int) \d table_ctas1 -EXECUTE spocktab('table_ctas1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas1'); -- Replication set: default_insert_only -- Expected data: (1, 'Alice', 30), (2, 'Bob', 25), (3, 'Carol', 35) SELECT * FROM table_ctas1 ORDER BY id; -- Validate table_ctas2 -- Expected: table exists with columns id (int), age (int), primary key on id \d table_ctas2 -EXECUTE spocktab('table_ctas2'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas2'); -- Replication set: default -- Expected data: (3, 35) SELECT * FROM table_ctas2 ORDER BY id; -- Validate table_ctas3 -- Expected: table exists with columns id (int), value (int), primary key on id \d table_ctas3 -EXECUTE spocktab('table_ctas3'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas3'); -- Replication set: default -- Expected data: (1, 10), (2, 20), (3, 30) SELECT * FROM table_ctas3 ORDER BY id; -- Validate table_ctas4 -- Expected: table exists with columns id (int), name (varchar), double_age (int), no data \d table_ctas4 -EXECUTE spocktab('table_ctas4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas4'); -- Replication set: default_insert_only -- Expected data: empty (no data) SELECT * FROM table_ctas4 ORDER BY id; -- Validate table_ctas5 -- Expected: table exists with column num (int) \d table_ctas5 -EXECUTE spocktab('table_ctas5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas5'); -- Replication set: default_insert_only -- Expected data: 1 through 10 SELECT * FROM table_ctas5 ORDER BY num; -- Validate table_ctas6 -- Expected: table exists with column a (int) \d table_ctas6 -EXECUTE spocktab('table_ctas6'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas6'); -- Replication set: default_insert_only -- Expected data: 1 SELECT * FROM table_ctas6 ORDER BY a; -- Validate table_si1 -- Expected: table exists with columns id (int), column1 (text), column2 (int), column3 (date), column4 (boolean) \d table_si1 -EXECUTE spocktab('table_si1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si1'); -- Replication set: default_insert_only -- Expected data: (1, 'value1', 10, '2023-01-01', TRUE), (2, 'value2', 20, '2023-01-02', FALSE), (3, 'value3', 30, '2023-01-03', TRUE), (4, 'value4', 40, '2023-01-04', FALSE) SELECT * FROM table_si1 ORDER BY id; -- Validate table_si2 -- Expected: table exists with columns id (int), column1 (text), column2 (int) \d table_si2 -EXECUTE spocktab('table_si2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si2'); -- Replication set: default_insert_only -- Expected data: (3, 'value3', 30), (4, 'value4', 40) SELECT * FROM table_si2 ORDER BY id; -- Validate table_si3 -- Expected: table exists with columns column4 (boolean), count (int) \d table_si3 -EXECUTE spocktab('table_si3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si3'); -- Replication set: default_insert_only -- Expected data: (TRUE, 2), (FALSE, 2) SELECT * FROM table_si3 ORDER BY column4; -- Validate table_si4 -- Expected: table exists with columns id (int), column1 (text) \d table_si4 -EXECUTE spocktab('table_si4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si4'); -- Replication set: default_insert_only -- Expected data: (4, 'value4'), (3, 'value3') SELECT * FROM table_si4 ORDER BY id; -- Validate table_si5 -- Expected: table exists with columns id (int), column1 (text), extra_data (varchar) \d table_si5 -EXECUTE spocktab('table_si5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si5'); -- Replication set: default_insert_only -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') SELECT * FROM table_si5 ORDER BY id; -- Validate table_l1 -- Expected: table exists with columns col1 (int), col2 (text, default 'default_text') \d table_l1 -EXECUTE spocktab('table_l1'); -- Replication set: default_insert_repset +SELECT * FROM get_table_repset_info('table_l1'); -- Replication set: default_insert_repset -- Expected data: (3, 'default_text') SELECT * FROM table_l1 ORDER BY col1; -- Validate table_l2 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l2 -EXECUTE spocktab('table_l2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l2'); -- Replication set: default_insert_only -- Expected data: (4, 'text4') SELECT * FROM table_l2 ORDER BY col1; -- Validate table_l3 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), check constraint, unique constraint \d table_l3 -EXECUTE spocktab('table_l3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l3'); -- Replication set: default_insert_only -- Expected data: (3, 'unique_text3', '2023-01-03') SELECT * FROM table_l3 ORDER BY col1; -- Validate table_l4 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), no constraints \d table_l4 -EXECUTE spocktab('table_l4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l4'); -- Replication set: default_insert_only -- Expected data: (4, 'text4', '2023-01-04') SELECT * FROM table_l4 ORDER BY col1; -- Validate table_l5 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l5 -EXECUTE spocktab('table_l5'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_l5'); -- Replication set: default -- Expected data: (5, ) SELECT * FROM table_l5 ORDER BY col1; diff --git a/t/auto_ddl/6111c_table_validate_n1.out b/t/auto_ddl/6111c_table_validate_n1.out index 2e1781a..b569a34 100644 --- a/t/auto_ddl/6111c_table_validate_n1.out +++ b/t/auto_ddl/6111c_table_validate_n1.out @@ -1,12 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6111c - Validate tables on n1 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET -- Validate sub_tx_table0 -- Expected: table does not exist \d sub_tx_table0 Did not find any relation named "sub_tx_table0". -EXECUTE spocktab('sub_tx_table0'); +SELECT * FROM get_table_repset_info('sub_tx_table0'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -15,7 +22,7 @@ EXECUTE spocktab('sub_tx_table0'); -- Expected: table does not exist \d sub_tx_table2 Did not find any relation named "sub_tx_table2". -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -24,7 +31,7 @@ EXECUTE spocktab('sub_tx_table2'); -- Expected: table does not exist \d sub_tx_table3 Did not find any relation named "sub_tx_table3". -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -33,28 +40,28 @@ EXECUTE spocktab('sub_tx_table3'); -- Expected: tables do not exist \d sub_tx_table5 Did not find any relation named "sub_tx_table5". -EXECUTE spocktab('sub_tx_table5'); +SELECT * FROM get_table_repset_info('sub_tx_table5'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5a Did not find any relation named "sub_tx_table5a". -EXECUTE spocktab('sub_tx_table5a'); +SELECT * FROM get_table_repset_info('sub_tx_table5a'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5c Did not find any relation named "sub_tx_table5c". -EXECUTE spocktab('sub_tx_table5c'); +SELECT * FROM get_table_repset_info('sub_tx_table5c'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -63,7 +70,7 @@ EXECUTE spocktab('sub_tx_table5c'); -- Expected: table does not exist \d table_ctas1 Did not find any relation named "table_ctas1". -EXECUTE spocktab('table_ctas1'); +SELECT * FROM get_table_repset_info('table_ctas1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -72,7 +79,7 @@ EXECUTE spocktab('table_ctas1'); -- Expected: table does not exist \d table_ctas2 Did not find any relation named "table_ctas2". -EXECUTE spocktab('table_ctas2'); +SELECT * FROM get_table_repset_info('table_ctas2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -81,7 +88,7 @@ EXECUTE spocktab('table_ctas2'); -- Expected: table does not exist \d table_ctas3 Did not find any relation named "table_ctas3". -EXECUTE spocktab('table_ctas3'); +SELECT * FROM get_table_repset_info('table_ctas3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -90,7 +97,7 @@ EXECUTE spocktab('table_ctas3'); -- Expected: table does not exist \d table_ctas4 Did not find any relation named "table_ctas4". -EXECUTE spocktab('table_ctas4'); +SELECT * FROM get_table_repset_info('table_ctas4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -99,7 +106,7 @@ EXECUTE spocktab('table_ctas4'); -- Expected: table does not exist \d table_ctas5 Did not find any relation named "table_ctas5". -EXECUTE spocktab('table_ctas5'); +SELECT * FROM get_table_repset_info('table_ctas5'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -108,7 +115,7 @@ EXECUTE spocktab('table_ctas5'); -- Expected: table does not exist \d table_ctas6 Did not find any relation named "table_ctas6". -EXECUTE spocktab('table_ctas6'); +SELECT * FROM get_table_repset_info('table_ctas6'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -117,7 +124,7 @@ EXECUTE spocktab('table_ctas6'); -- Expected: table does not exist \d table_si1 Did not find any relation named "table_si1". -EXECUTE spocktab('table_si1'); +SELECT * FROM get_table_repset_info('table_si1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -126,7 +133,7 @@ EXECUTE spocktab('table_si1'); -- Expected: table does not exist \d table_si2 Did not find any relation named "table_si2". -EXECUTE spocktab('table_si2'); +SELECT * FROM get_table_repset_info('table_si2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -135,7 +142,7 @@ EXECUTE spocktab('table_si2'); -- Expected: table does not exist \d table_si3 Did not find any relation named "table_si3". -EXECUTE spocktab('table_si3'); +SELECT * FROM get_table_repset_info('table_si3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -144,7 +151,7 @@ EXECUTE spocktab('table_si3'); -- Expected: table does not exist \d table_si4 Did not find any relation named "table_si4". -EXECUTE spocktab('table_si4'); +SELECT * FROM get_table_repset_info('table_si4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -153,7 +160,7 @@ EXECUTE spocktab('table_si4'); -- Expected: table does not exist \d table_si5 Did not find any relation named "table_si5". -EXECUTE spocktab('table_si5'); +SELECT * FROM get_table_repset_info('table_si5'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -162,7 +169,7 @@ EXECUTE spocktab('table_si5'); -- Expected: table does not exist \d table_l1 Did not find any relation named "table_l1". -EXECUTE spocktab('table_l1'); +SELECT * FROM get_table_repset_info('table_l1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -171,7 +178,7 @@ EXECUTE spocktab('table_l1'); -- Expected: table does not exist \d table_l2 Did not find any relation named "table_l2". -EXECUTE spocktab('table_l2'); +SELECT * FROM get_table_repset_info('table_l2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -180,7 +187,7 @@ EXECUTE spocktab('table_l2'); -- Expected: table does not exist \d table_l3 Did not find any relation named "table_l3". -EXECUTE spocktab('table_l3'); +SELECT * FROM get_table_repset_info('table_l3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -189,7 +196,7 @@ EXECUTE spocktab('table_l3'); -- Expected: table does not exist \d table_l4 Did not find any relation named "table_l4". -EXECUTE spocktab('table_l4'); +SELECT * FROM get_table_repset_info('table_l4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -198,8 +205,14 @@ EXECUTE spocktab('table_l4'); -- Expected: table does not exist \d table_l5 Did not find any relation named "table_l5". -EXECUTE spocktab('table_l5'); +SELECT * FROM get_table_repset_info('table_l5'); nspname | relname | set_name ---------+---------+---------- (0 rows) +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s611 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6111c_table_validate_n1.sql b/t/auto_ddl/6111c_table_validate_n1.sql index 350f5dc..8f2c751 100644 --- a/t/auto_ddl/6111c_table_validate_n1.sql +++ b/t/auto_ddl/6111c_table_validate_n1.sql @@ -1,110 +1,116 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6111c - Validate tables on n1 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; +SET search_path TO s611, public; -- Validate sub_tx_table0 -- Expected: table does not exist \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Validate sub_tx_table2 -- Expected: table does not exist \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Validate sub_tx_table3 -- Expected: table does not exist \d sub_tx_table3 -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Validate sub_tx_table5, sub_tx_table5a, sub_tx_table5c, sub_tx_table5b should not exist -- Expected: tables do not exist \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); +SELECT * FROM get_table_repset_info('sub_tx_table5'); \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); +SELECT * FROM get_table_repset_info('sub_tx_table5a'); \d sub_tx_table5b -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Validate table_ctas1 -- Expected: table does not exist \d table_ctas1 -EXECUTE spocktab('table_ctas1'); +SELECT * FROM get_table_repset_info('table_ctas1'); -- Validate table_ctas2 -- Expected: table does not exist \d table_ctas2 -EXECUTE spocktab('table_ctas2'); +SELECT * FROM get_table_repset_info('table_ctas2'); -- Validate table_ctas3 -- Expected: table does not exist \d table_ctas3 -EXECUTE spocktab('table_ctas3'); +SELECT * FROM get_table_repset_info('table_ctas3'); -- Validate table_ctas4 -- Expected: table does not exist \d table_ctas4 -EXECUTE spocktab('table_ctas4'); +SELECT * FROM get_table_repset_info('table_ctas4'); -- Validate table_ctas5 -- Expected: table does not exist \d table_ctas5 -EXECUTE spocktab('table_ctas5'); +SELECT * FROM get_table_repset_info('table_ctas5'); -- Validate table_ctas6 -- Expected: table does not exist \d table_ctas6 -EXECUTE spocktab('table_ctas6'); +SELECT * FROM get_table_repset_info('table_ctas6'); -- Validate table_si1 -- Expected: table does not exist \d table_si1 -EXECUTE spocktab('table_si1'); +SELECT * FROM get_table_repset_info('table_si1'); -- Validate table_si2 -- Expected: table does not exist \d table_si2 -EXECUTE spocktab('table_si2'); +SELECT * FROM get_table_repset_info('table_si2'); -- Validate table_si3 -- Expected: table does not exist \d table_si3 -EXECUTE spocktab('table_si3'); +SELECT * FROM get_table_repset_info('table_si3'); -- Validate table_si4 -- Expected: table does not exist \d table_si4 -EXECUTE spocktab('table_si4'); +SELECT * FROM get_table_repset_info('table_si4'); -- Validate table_si5 -- Expected: table does not exist \d table_si5 -EXECUTE spocktab('table_si5'); +SELECT * FROM get_table_repset_info('table_si5'); -- Validate table_l1 -- Expected: table does not exist \d table_l1 -EXECUTE spocktab('table_l1'); +SELECT * FROM get_table_repset_info('table_l1'); -- Validate table_l2 -- Expected: table does not exist \d table_l2 -EXECUTE spocktab('table_l2'); +SELECT * FROM get_table_repset_info('table_l2'); -- Validate table_l3 -- Expected: table does not exist \d table_l3 -EXECUTE spocktab('table_l3'); +SELECT * FROM get_table_repset_info('table_l3'); -- Validate table_l4 -- Expected: table does not exist \d table_l4 -EXECUTE spocktab('table_l4'); +SELECT * FROM get_table_repset_info('table_l4'); -- Validate table_l5 -- Expected: table does not exist \d table_l5 -EXECUTE spocktab('table_l5'); +SELECT * FROM get_table_repset_info('table_l5'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s611 CASCADE; \ No newline at end of file From 8945045c181a200c3d37768e3146b59a96a2fcc9 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Mon, 28 Oct 2024 20:55:13 +0500 Subject: [PATCH 30/48] [AutoDDL] Update 6122/6133 scripts to execute via nonsuperuser and adjust outputs Updated AutoDDL SQL scripts 6122(a, b, c) and 6133(a, b, c) to execute primarily under the non-superuser (appuser) role, switching to superuser where necessary. Adjusted the related SQL scripts and expected output files to reflect this change. --- .../6122a_table_range_partitions_n1.out | 142 ++++++++++-------- .../6122a_table_range_partitions_n1.sql | 30 ++-- ...22b_table_range_partitions_validate_n2.out | 67 +++++---- ...22b_table_range_partitions_validate_n2.sql | 13 +- ...6122c_table_range_parition_validate_n1.out | 50 +++++- ...6122c_table_range_parition_validate_n1.sql | 21 ++- t/auto_ddl/6133a_table_list_partitions_n1.out | 139 ++++++++++++++--- t/auto_ddl/6133a_table_list_partitions_n1.sql | 24 +-- ...133b_table_list_partitions_validate_n2.out | 91 +++++++++-- ...133b_table_list_partitions_validate_n2.sql | 16 +- .../6133c_table_list_parition_validate_n1.out | 39 +++-- .../6133c_table_list_parition_validate_n1.sql | 17 ++- 12 files changed, 456 insertions(+), 193 deletions(-) diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.out b/t/auto_ddl/6122a_table_range_partitions_n1.out index 4868c45..38f17c8 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.out +++ b/t/auto_ddl/6122a_table_range_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s612; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s612 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET -------------------------------- -- Range Partitioned Tables -------------------------------- @@ -29,12 +43,12 @@ INSERT INTO sales_range (sale_id, sale_date, amount) VALUES (3, '2022-02-10', 250.00); INSERT 0 3 -- Validate structure and data -EXECUTE spocktab('sales_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) SELECT * FROM sales_range ORDER BY sale_id; -- Expect 3 rows sorted by sale_id @@ -68,12 +82,12 @@ INSERT INTO revenue_range (rev_id, rev_date, revenue) VALUES (102, '2022-05-18', 400.00); INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('revenue_range'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect both parent and child tables in default_insert_only set nspname | relname | set_name ---------+--------------------+--------------------- - public | revenue_range | default_insert_only - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only + s612 | revenue_range | default_insert_only + s612 | revenue_range_2021 | default_insert_only + s612 | revenue_range_2022 | default_insert_only (3 rows) SELECT * FROM revenue_range ORDER BY rev_id; -- Expect 2 rows sorted by rev_id @@ -89,7 +103,7 @@ CREATE TABLE sales_range_2023 PARTITION OF sales_range INFO: DDL statement replicated. CREATE TABLE \d+ sales_range_2023 - Table "public.sales_range_2023" + Table "s612.sales_range_2023" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -101,13 +115,13 @@ Indexes: "sales_range_2023_pkey" PRIMARY KEY, btree (sale_id, sale_date) Access method: heap -EXECUTE spocktab('sales_range'); -- Expect sales_range_2023 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range_2023 in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default - public | sales_range_2023 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default + s612 | sales_range_2023 | default (4 rows) -- Add a primary key to a range partitioned table that initially didn't have one @@ -115,7 +129,7 @@ ALTER TABLE revenue_range ADD PRIMARY KEY (rev_id, rev_date); INFO: DDL statement replicated. ALTER TABLE \d revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default ----------+---------+-----------+----------+--------- rev_id | integer | | not null | @@ -126,12 +140,12 @@ Indexes: "revenue_range_pkey" PRIMARY KEY, btree (rev_id, rev_date) Number of partitions: 2 (Use \d+ to list them.) -EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range and all child partitions to move to default set nspname | relname | set_name ---------+--------------------+---------- - public | revenue_range | default - public | revenue_range_2021 | default - public | revenue_range_2022 | default + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default (3 rows) -- Add another partition to the modified table @@ -140,7 +154,7 @@ CREATE TABLE revenue_range_2023 PARTITION OF revenue_range INFO: DDL statement replicated. CREATE TABLE \d+ revenue_range_2023 - Table "public.revenue_range_2023" + Table "s612.revenue_range_2023" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -152,13 +166,13 @@ Indexes: "revenue_range_2023_pkey" PRIMARY KEY, btree (rev_id, rev_date) Access method: heap -EXECUTE spocktab('revenue_range'); -- Expect revenue_range_2023 in default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range_2023 in default set nspname | relname | set_name ---------+--------------------+---------- - public | revenue_range | default - public | revenue_range_2021 | default - public | revenue_range_2022 | default - public | revenue_range_2023 | default + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default + s612 | revenue_range_2023 | default (4 rows) -- Insert data into the newly added partitions @@ -193,12 +207,12 @@ INSERT INTO orders_range (order_id, order_date, customer_id, total) VALUES (1002, '2022-01-10', 2, 1000.00); INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('orders_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+-------------------+---------- - public | orders_range | default - public | orders_range_2021 | default - public | orders_range_2022 | default + s612 | orders_range | default + s612 | orders_range_2021 | default + s612 | orders_range_2022 | default (3 rows) SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows @@ -212,25 +226,25 @@ SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows ALTER TABLE sales_range DETACH PARTITION sales_range_2023; INFO: DDL statement replicated. ALTER TABLE -EXECUTE spocktab('sales_range'); --should still have the repset assigned +SELECT * FROM get_table_repset_info('sales_range'); --should still have the repset assigned nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default - public | sales_range_2023 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default + s612 | sales_range_2023 | default (4 rows) DROP TABLE sales_range_2023; NOTICE: drop cascades to table sales_range_2023 membership in replication set default INFO: DDL statement replicated. DROP TABLE -EXECUTE spocktab('sales_range'); -- validate sales_range_2023 to be removed +SELECT * FROM get_table_repset_info('sales_range'); -- validate sales_range_2023 to be removed nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) -- Create a range partitioned table with default partition @@ -257,12 +271,12 @@ INSERT INTO inventory_range (product_id, product_date, quantity) VALUES (2, '2022-02-10', 100); -- Should go to default partition INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('inventory_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('inventory_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+-------------------------+---------- - public | inventory_range | default - public | inventory_range_2021 | default - public | inventory_range_default | default + s612 | inventory_range | default + s612 | inventory_range_2021 | default + s612 | inventory_range_default | default (3 rows) SELECT * FROM inventory_range ORDER BY product_id; -- Expect 2 rows @@ -306,13 +320,13 @@ ALTER TABLE inventory_range ATTACH PARTITION inventory_standalone FOR VALUES FRO INFO: DDL statement replicated. ALTER TABLE -- Validate structure and data -EXECUTE spocktab('inventory'); -- Expect inventory_standalone to be listed +SELECT * FROM get_table_repset_info('inventory'); -- Expect inventory_standalone to be listed nspname | relname | set_name ---------+-------------------------+---------- - public | inventory_range | default - public | inventory_range_2021 | default - public | inventory_range_default | default - public | inventory_standalone | default + s612 | inventory_range | default + s612 | inventory_range_2021 | default + s612 | inventory_range_default | default + s612 | inventory_standalone | default (4 rows) SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row @@ -360,7 +374,7 @@ SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final structure \d+ sales_range - Partitioned table "public.sales_range" + Partitioned table "s612.sales_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -373,7 +387,7 @@ Partitions: sales_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), sales_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ sales_range_2021 - Table "public.sales_range_2021" + Table "s612.sales_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -386,7 +400,7 @@ Indexes: Access method: heap \d+ sales_range_2022 - Table "public.sales_range_2022" + Table "s612.sales_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -401,7 +415,7 @@ Access method: heap \d+ sales_range_2023 Did not find any relation named "sales_range_2023". \d+ revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -415,7 +429,7 @@ Partitions: revenue_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), revenue_range_2023 FOR VALUES FROM ('2023-01-01') TO ('2024-01-01') \d+ revenue_range_2021 - Table "public.revenue_range_2021" + Table "s612.revenue_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -428,7 +442,7 @@ Indexes: Access method: heap \d+ revenue_range_2022 - Table "public.revenue_range_2022" + Table "s612.revenue_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -441,7 +455,7 @@ Indexes: Access method: heap \d orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- order_id | integer | | not null | @@ -454,7 +468,7 @@ Indexes: Number of partitions: 2 (Use \d+ to list them.) \d+ orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -468,7 +482,7 @@ Partitions: orders_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), orders_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ orders_range_2021 - Table "public.orders_range_2021" + Table "s612.orders_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -482,7 +496,7 @@ Indexes: Access method: heap \d+ orders_range_2022 - Table "public.orders_range_2022" + Table "s612.orders_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -496,7 +510,7 @@ Indexes: Access method: heap \d+ inventory_range - Partitioned table "public.inventory_range" + Partitioned table "s612.inventory_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -511,7 +525,7 @@ Partitions: inventory_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01' inventory_range_default DEFAULT \d+ inventory_range_2021 - Table "public.inventory_range_2021" + Table "s612.inventory_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -527,7 +541,7 @@ Check constraints: Access method: heap \d+ inventory_range_default - Table "public.inventory_range_default" + Table "s612.inventory_range_default" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -541,7 +555,7 @@ Indexes: Access method: heap \d+ inventory_standalone - Table "public.inventory_standalone" + Table "s612.inventory_standalone" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.sql b/t/auto_ddl/6122a_table_range_partitions_n1.sql index b064fc6..b43e0ec 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.sql +++ b/t/auto_ddl/6122a_table_range_partitions_n1.sql @@ -1,6 +1,12 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s612; +GRANT ALL PRIVILEGES ON SCHEMA s612 TO appuser; + +SET ROLE appuser; + +SET search_path TO s612, public; -------------------------------- -- Range Partitioned Tables -------------------------------- @@ -26,7 +32,7 @@ INSERT INTO sales_range (sale_id, sale_date, amount) VALUES (3, '2022-02-10', 250.00); -- Validate structure and data -EXECUTE spocktab('sales_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect both parent and child tables in default set SELECT * FROM sales_range ORDER BY sale_id; -- Expect 3 rows sorted by sale_id -- Create another range partitioned table without primary key @@ -49,26 +55,26 @@ INSERT INTO revenue_range (rev_id, rev_date, revenue) VALUES -- Validate structure and data -EXECUTE spocktab('revenue_range'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM revenue_range ORDER BY rev_id; -- Expect 2 rows sorted by rev_id -- Alter table to add a new partition CREATE TABLE sales_range_2023 PARTITION OF sales_range FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); \d+ sales_range_2023 -EXECUTE spocktab('sales_range'); -- Expect sales_range_2023 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range_2023 in default set -- Add a primary key to a range partitioned table that initially didn't have one ALTER TABLE revenue_range ADD PRIMARY KEY (rev_id, rev_date); \d revenue_range -EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range and all child partitions to move to default set -- Add another partition to the modified table CREATE TABLE revenue_range_2023 PARTITION OF revenue_range FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); \d+ revenue_range_2023 -EXECUTE spocktab('revenue_range'); -- Expect revenue_range_2023 in default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range_2023 in default set -- Insert data into the newly added partitions INSERT INTO sales_range (sale_id, sale_date, amount) VALUES @@ -97,14 +103,14 @@ INSERT INTO orders_range (order_id, order_date, customer_id, total) VALUES (1002, '2022-01-10', 2, 1000.00); -- Validate structure and data -EXECUTE spocktab('orders_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect both parent and child tables in default set SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows -- Drop a partition ALTER TABLE sales_range DETACH PARTITION sales_range_2023; -EXECUTE spocktab('sales_range'); --should still have the repset assigned +SELECT * FROM get_table_repset_info('sales_range'); --should still have the repset assigned DROP TABLE sales_range_2023; -EXECUTE spocktab('sales_range'); -- validate sales_range_2023 to be removed +SELECT * FROM get_table_repset_info('sales_range'); -- validate sales_range_2023 to be removed -- Create a range partitioned table with default partition CREATE TABLE inventory_range ( @@ -126,7 +132,7 @@ INSERT INTO inventory_range (product_id, product_date, quantity) VALUES (2, '2022-02-10', 100); -- Should go to default partition -- Validate structure and data -EXECUTE spocktab('inventory_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('inventory_range'); -- Expect both parent and child tables in default set SELECT * FROM inventory_range ORDER BY product_id; -- Expect 2 rows -- Alter the inventory_range table to add a new column and change data type @@ -158,7 +164,7 @@ INSERT INTO inventory_standalone (product_id, product_date, quantity, price) VAL ALTER TABLE inventory_range ATTACH PARTITION inventory_standalone FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); -- Validate structure and data -EXECUTE spocktab('inventory'); -- Expect inventory_standalone to be listed +SELECT * FROM get_table_repset_info('inventory'); -- Expect inventory_standalone to be listed SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final data diff --git a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out index 7d2d3d2..75e2cb9 100644 --- a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out +++ b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out @@ -1,30 +1,37 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE -EXECUTE spocktab('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) -EXECUTE spocktab('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set nspname | relname | set_name ---------+--------------------+---------- - public | revenue_range | default - public | revenue_range_2021 | default - public | revenue_range_2022 | default - public | revenue_range_2023 | default + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default + s612 | revenue_range_2023 | default (4 rows) -EXECUTE spocktab('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set nspname | relname | set_name ---------+-------------------+---------- - public | orders_range | default - public | orders_range_2021 | default - public | orders_range_2022 | default + s612 | orders_range | default + s612 | orders_range_2021 | default + s612 | orders_range_2022 | default (3 rows) -- Validate final data @@ -66,7 +73,7 @@ SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final structure \d+ sales_range - Partitioned table "public.sales_range" + Partitioned table "s612.sales_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -79,7 +86,7 @@ Partitions: sales_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), sales_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ sales_range_2021 - Table "public.sales_range_2021" + Table "s612.sales_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -92,7 +99,7 @@ Indexes: Access method: heap \d+ sales_range_2022 - Table "public.sales_range_2022" + Table "s612.sales_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -107,7 +114,7 @@ Access method: heap \d+ sales_range_2023 Did not find any relation named "sales_range_2023". \d+ revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -121,7 +128,7 @@ Partitions: revenue_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), revenue_range_2023 FOR VALUES FROM ('2023-01-01') TO ('2024-01-01') \d+ revenue_range_2021 - Table "public.revenue_range_2021" + Table "s612.revenue_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -134,7 +141,7 @@ Indexes: Access method: heap \d+ revenue_range_2022 - Table "public.revenue_range_2022" + Table "s612.revenue_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -147,7 +154,7 @@ Indexes: Access method: heap \d orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- order_id | integer | | not null | @@ -160,7 +167,7 @@ Indexes: Number of partitions: 2 (Use \d+ to list them.) \d+ orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -174,7 +181,7 @@ Partitions: orders_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), orders_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ orders_range_2021 - Table "public.orders_range_2021" + Table "s612.orders_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -188,7 +195,7 @@ Indexes: Access method: heap \d+ orders_range_2022 - Table "public.orders_range_2022" + Table "s612.orders_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -202,7 +209,7 @@ Indexes: Access method: heap \d+ inventory_range - Partitioned table "public.inventory_range" + Partitioned table "s612.inventory_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -217,7 +224,7 @@ Partitions: inventory_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01' inventory_range_default DEFAULT \d+ inventory_range_2021 - Table "public.inventory_range_2021" + Table "s612.inventory_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -233,7 +240,7 @@ Check constraints: Access method: heap \d+ inventory_range_default - Table "public.inventory_range_default" + Table "s612.inventory_range_default" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -247,7 +254,7 @@ Indexes: Access method: heap \d+ inventory_standalone - Table "public.inventory_standalone" + Table "s612.inventory_standalone" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | diff --git a/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql b/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql index 1170527..2b464e7 100644 --- a/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql +++ b/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql @@ -1,10 +1,13 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s612, public; -EXECUTE spocktab('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set -EXECUTE spocktab('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set -EXECUTE spocktab('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set -- Validate final data SELECT * FROM sales_range ORDER BY sale_id; -- Expect all rows diff --git a/t/auto_ddl/6122c_table_range_parition_validate_n1.out b/t/auto_ddl/6122c_table_range_parition_validate_n1.out index de06f82..0df4929 100644 --- a/t/auto_ddl/6122c_table_range_parition_validate_n1.out +++ b/t/auto_ddl/6122c_table_range_parition_validate_n1.out @@ -1,11 +1,15 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6122b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) - +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET -- none of these tables should exist. \d+ sales_range Did not find any relation named "sales_range". @@ -15,20 +19,38 @@ Did not find any relation named "sales_range_2021". Did not find any relation named "sales_range_2022". \d+ sales_range_2023 Did not find any relation named "sales_range_2023". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('sales'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ revenue_range Did not find any relation named "revenue_range". \d+ revenue_range_2021 Did not find any relation named "revenue_range_2021". \d+ revenue_range_2022 Did not find any relation named "revenue_range_2022". -\d orders_range -Did not find any relation named "orders_range". +\d revenue_range +Did not find any relation named "revenue_range". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('revenue'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ orders_range Did not find any relation named "orders_range". \d+ orders_range_2021 Did not find any relation named "orders_range_2021". \d+ orders_range_2022 Did not find any relation named "orders_range_2022". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('orders'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ inventory_range Did not find any relation named "inventory_range". \d+ inventory_range_2021 @@ -37,3 +59,15 @@ Did not find any relation named "inventory_range_2021". Did not find any relation named "inventory_range_default". \d+ inventory_standalone Did not find any relation named "inventory_standalone". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('inventory'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s612 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6122c_table_range_parition_validate_n1.sql b/t/auto_ddl/6122c_table_range_parition_validate_n1.sql index b74f52c..a799387 100644 --- a/t/auto_ddl/6122c_table_range_parition_validate_n1.sql +++ b/t/auto_ddl/6122c_table_range_parition_validate_n1.sql @@ -1,24 +1,39 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6122b +SET ROLE appuser; + +SET search_path TO s612, public; ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; -- none of these tables should exist. \d+ sales_range \d+ sales_range_2021 \d+ sales_range_2022 \d+ sales_range_2023 +--spock.tables should be empty +SELECT * FROM get_table_repset_info('sales'); \d+ revenue_range \d+ revenue_range_2021 \d+ revenue_range_2022 -\d orders_range +\d revenue_range +--spock.tables should be empty +SELECT * FROM get_table_repset_info('revenue'); \d+ orders_range \d+ orders_range_2021 \d+ orders_range_2022 +--spock.tables should be empty +SELECT * FROM get_table_repset_info('orders'); \d+ inventory_range \d+ inventory_range_2021 \d+ inventory_range_default \d+ inventory_standalone +--spock.tables should be empty +SELECT * FROM get_table_repset_info('inventory'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s612 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6133a_table_list_partitions_n1.out b/t/auto_ddl/6133a_table_list_partitions_n1.out index c4eb705..2c5f96f 100644 --- a/t/auto_ddl/6133a_table_list_partitions_n1.out +++ b/t/auto_ddl/6133a_table_list_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s613; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s613 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET ----------------------------- -- List Partitioning ----------------------------- @@ -28,12 +42,12 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES (2, 'West', 200.0), (3, 'East', 150.0); INSERT 0 3 -EXECUTE spocktab('sales_list'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_list'); -- Expect both parent and child tables in default repset nspname | relname | set_name ---------+-----------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default (3 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 3 rows @@ -55,7 +69,7 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES INSERT 0 1 -- Validate structure and data after adding new partition \d+ sales_list_east - Table "public.sales_list_east" + Table "s613.sales_list_east" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -68,7 +82,7 @@ Indexes: Access method: heap \d+ sales_list_west - Table "public.sales_list_west" + Table "s613.sales_list_west" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -81,7 +95,7 @@ Indexes: Access method: heap \d+ sales_list_north - Table "public.sales_list_north" + Table "s613.sales_list_north" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -94,7 +108,7 @@ Indexes: Access method: heap \d+ sales_list - Partitioned table "public.sales_list" + Partitioned table "s613.sales_list" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -107,13 +121,13 @@ Partitions: sales_list_east FOR VALUES IN ('East'), sales_list_north FOR VALUES IN ('North'), sales_list_west FOR VALUES IN ('West') -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed nspname | relname | set_name ---------+------------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default - public | sales_list_north | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default + s613 | sales_list_north | default (4 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows @@ -125,39 +139,114 @@ SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows 4 | North | 250.0 (4 rows) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -- Create a list partitioned table without primary key CREATE TABLE products_list ( product_id INT, product_category TEXT, product_name TEXT ) PARTITION BY LIST (product_category); - +INFO: DDL statement replicated. +CREATE TABLE -- Add partitions to the products_list table CREATE TABLE products_list_electronics PARTITION OF products_list FOR VALUES IN ('Electronics'); +INFO: DDL statement replicated. +CREATE TABLE CREATE TABLE products_list_clothing PARTITION OF products_list FOR VALUES IN ('Clothing'); - +INFO: DDL statement replicated. +CREATE TABLE -- Insert data into the products_list table INSERT INTO products_list (product_id, product_category, product_name) VALUES (1, 'Electronics', 'Laptop'), (2, 'Clothing', 'Shirt'), (3, 'Electronics', 'Smartphone'); - +INSERT 0 3 -- Validate structure and data \d+ products_list -EXECUTE spocktab('products_list'); -- Expect both parent and child tables in default_insert_only set + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | | | plain | | | + product_category | text | | | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + +SELECT * FROM get_table_repset_info('products_list'); -- Expect both parent and child tables in default_insert_only set + nspname | relname | set_name +---------+---------------------------+--------------------- + s613 | products_list | default_insert_only + s613 | products_list_electronics | default_insert_only + s613 | products_list_clothing | default_insert_only +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) -- Alter the products_list table to add a primary key ALTER TABLE products_list ADD PRIMARY KEY (product_id, product_category); - +INFO: DDL statement replicated. +ALTER TABLE -- Validate structure and data after adding primary key \d+ products_list + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Indexes: + "products_list_pkey" PRIMARY KEY, btree (product_id, product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + \d+ products_list_clothing + Table "s613.products_list_clothing" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Clothing') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Clothing'::text)) +Indexes: + "products_list_clothing_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect the replication set to change to default + Table "s613.products_list_electronics" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Electronics') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Electronics'::text)) +Indexes: + "products_list_electronics_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + +SELECT * FROM get_table_repset_info('products_list'); -- Expect the replication set to change to default + nspname | relname | set_name +---------+---------------------------+---------- + s613 | products_list | default + s613 | products_list_electronics | default + s613 | products_list_clothing | default +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -*/ + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) + diff --git a/t/auto_ddl/6133a_table_list_partitions_n1.sql b/t/auto_ddl/6133a_table_list_partitions_n1.sql index 5f39319..152e90c 100644 --- a/t/auto_ddl/6133a_table_list_partitions_n1.sql +++ b/t/auto_ddl/6133a_table_list_partitions_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s613; + +GRANT ALL PRIVILEGES ON SCHEMA s613 TO appuser; + +SET ROLE appuser; + +SET search_path TO s613, public; ----------------------------- -- List Partitioning @@ -25,7 +33,7 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES (2, 'West', 200.0), (3, 'East', 150.0); -EXECUTE spocktab('sales_list'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_list'); -- Expect both parent and child tables in default repset SELECT * FROM sales_list ORDER BY sale_id; -- Expect 3 rows -- Alter the sales_list table to add a new partition @@ -41,10 +49,8 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES \d+ sales_list_west \d+ sales_list_north \d+ sales_list -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -- Create a list partitioned table without primary key CREATE TABLE products_list ( product_id INT, @@ -66,7 +72,7 @@ INSERT INTO products_list (product_id, product_category, product_name) VALUES -- Validate structure and data \d+ products_list -EXECUTE spocktab('products_list'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_list'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -- Alter the products_list table to add a primary key @@ -76,6 +82,6 @@ ALTER TABLE products_list ADD PRIMARY KEY (product_id, product_category); \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect the replication set to change to default +SELECT * FROM get_table_repset_info('products_list'); -- Expect the replication set to change to default SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -*/ + diff --git a/t/auto_ddl/6133b_table_list_partitions_validate_n2.out b/t/auto_ddl/6133b_table_list_partitions_validate_n2.out index 59c9700..3e70b40 100644 --- a/t/auto_ddl/6133b_table_list_partitions_validate_n2.out +++ b/t/auto_ddl/6133b_table_list_partitions_validate_n2.out @@ -1,9 +1,16 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET \d+ sales_list_east - Table "public.sales_list_east" + Table "s613.sales_list_east" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -16,7 +23,7 @@ Indexes: Access method: heap \d+ sales_list_west - Table "public.sales_list_west" + Table "s613.sales_list_west" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -29,7 +36,7 @@ Indexes: Access method: heap \d+ sales_list_north - Table "public.sales_list_north" + Table "s613.sales_list_north" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -42,7 +49,7 @@ Indexes: Access method: heap \d+ sales_list - Partitioned table "public.sales_list" + Partitioned table "s613.sales_list" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -55,13 +62,13 @@ Partitions: sales_list_east FOR VALUES IN ('East'), sales_list_north FOR VALUES IN ('North'), sales_list_west FOR VALUES IN ('West') -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed nspname | relname | set_name ---------+------------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default - public | sales_list_north | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default + s613 | sales_list_north | default (4 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows @@ -81,13 +88,65 @@ NOTICE: drop cascades to table sales_list_east membership in replication set de NOTICE: drop cascades to table sales_list membership in replication set default INFO: DDL statement replicated. DROP TABLE -/* -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 \d+ products_list + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Indexes: + "products_list_pkey" PRIMARY KEY, btree (product_id, product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + \d+ products_list_clothing + Table "s613.products_list_clothing" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Clothing') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Clothing'::text)) +Indexes: + "products_list_clothing_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect all to be in default repset + Table "s613.products_list_electronics" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Electronics') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Electronics'::text)) +Indexes: + "products_list_electronics_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + +SELECT * FROM get_table_repset_info('products_list'); -- Expect all to be in default repset + nspname | relname | set_name +---------+---------------------------+---------- + s613 | products_list | default + s613 | products_list_electronics | default + s613 | products_list_clothing | default +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) + --exercise ddl on n2 DROP TABLE products_list CASCADE; -*/ +NOTICE: drop cascades to table products_list_clothing membership in replication set default +NOTICE: drop cascades to table products_list_electronics membership in replication set default +NOTICE: drop cascades to table products_list membership in replication set default +INFO: DDL statement replicated. +DROP TABLE diff --git a/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql b/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql index 16ca3df..208890c 100644 --- a/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql +++ b/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql @@ -1,22 +1,24 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s613, public; \d+ sales_list_east \d+ sales_list_west \d+ sales_list_north \d+ sales_list -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE sales_list CASCADE; -/* -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 + \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect all to be in default repset +SELECT * FROM get_table_repset_info('products_list'); -- Expect all to be in default repset SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows --exercise ddl on n2 DROP TABLE products_list CASCADE; -*/ + diff --git a/t/auto_ddl/6133c_table_list_parition_validate_n1.out b/t/auto_ddl/6133c_table_list_parition_validate_n1.out index ff66694..a9a85c6 100644 --- a/t/auto_ddl/6133c_table_list_parition_validate_n1.out +++ b/t/auto_ddl/6133c_table_list_parition_validate_n1.out @@ -1,12 +1,16 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6133b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) - --- none of these tables should exist. +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET +--none of these should exist \d sales_list_east Did not find any relation named "sales_list_east". \d sales_list_west @@ -15,8 +19,25 @@ Did not find any relation named "sales_list_west". Did not find any relation named "sales_list_north". \d sales_list Did not find any relation named "sales_list". -/* +SELECT * FROM get_table_repset_info('sales'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ products_list +Did not find any relation named "products_list". \d+ products_list_clothing +Did not find any relation named "products_list_clothing". \d+ products_list_electronics -*/ +Did not find any relation named "products_list_electronics". +SELECT * FROM get_table_repset_info('products'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s613 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6133c_table_list_parition_validate_n1.sql b/t/auto_ddl/6133c_table_list_parition_validate_n1.sql index 4157dd7..33c1ab8 100644 --- a/t/auto_ddl/6133c_table_list_parition_validate_n1.sql +++ b/t/auto_ddl/6133c_table_list_parition_validate_n1.sql @@ -1,16 +1,23 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6133b +SET ROLE appuser; + +SET search_path TO s613, public; +--none of these should exist ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; --- none of these tables should exist. \d sales_list_east \d sales_list_west \d sales_list_north \d sales_list +SELECT * FROM get_table_repset_info('sales'); -/* \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -*/ \ No newline at end of file +SELECT * FROM get_table_repset_info('products'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s613 CASCADE; \ No newline at end of file From 9ed73f8bca5d9ad90b929e016dfcd3f39bee5ff6 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Mon, 28 Oct 2024 20:56:27 +0500 Subject: [PATCH 31/48] [AutoDDL] Update 6144/6155/6166 scripts to execute via nonsuperuser and adjust outputs Updated AutoDDL SQL scripts 6144, 6155, 6166 (a, b, c) to execute primarily under the non-superuser (appuser) role, switching to superuser where necessary. Adjusted the related SQL scripts and expected output files to reflect this change. --- t/auto_ddl/6144a_table_hash_partitions_n1.out | 90 +++++++------ t/auto_ddl/6144a_table_hash_partitions_n1.sql | 20 ++- ...144b_table_hash_partitions_validate_n2.out | 57 ++++---- ...144b_table_hash_partitions_validate_n2.sql | 11 +- .../6144c_table_hash_parition_validate_n1.out | 32 ++++- .../6144c_table_hash_parition_validate_n1.sql | 15 ++- t/auto_ddl/6155a_index_n1.out | 122 ++++++++++-------- t/auto_ddl/6155a_index_n1.sql | 20 ++- t/auto_ddl/6155b_index_validate_n2.out | 83 ++++++------ t/auto_ddl/6155b_index_validate_n2.sql | 15 ++- t/auto_ddl/6155c_index_validate_drop_n1.out | 21 ++- t/auto_ddl/6155c_index_validate_drop_n1.sql | 11 +- .../6166a_views_materialized_views_n1.out | 91 +++++++------ .../6166a_views_materialized_views_n1.sql | 50 ++++--- .../6166b_view_mat_views_validate_n2.out | 46 +++---- .../6166b_view_mat_views_validate_n2.sql | 26 ++-- .../6166c_views_mat_view_validate_n1.out | 26 ++-- .../6166c_views_mat_view_validate_n1.sql | 14 +- 18 files changed, 445 insertions(+), 305 deletions(-) diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.out b/t/auto_ddl/6144a_table_hash_partitions_n1.out index 6f2cbab..2d84b0d 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.out +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s614; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s614 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET ----------------------------- -- Hash Partitioning ----------------------------- @@ -37,14 +51,14 @@ INSERT INTO sales_hash (sale_id, sale_date, sale_amount) VALUES (3, '2023-01-03', 150.0), (4, '2023-01-04', 250.0); INSERT 0 4 -EXECUTE spocktab('sales_hash'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect both parent and child tables in default repset nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -58,7 +72,7 @@ SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Validate structure and data after adding new partition \d sales_hash_1 - Table "public.sales_hash_1" + Table "s614.sales_hash_1" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -69,7 +83,7 @@ Indexes: "sales_hash_1_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_2 - Table "public.sales_hash_2" + Table "s614.sales_hash_2" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -80,7 +94,7 @@ Indexes: "sales_hash_2_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_3 - Table "public.sales_hash_3" + Table "s614.sales_hash_3" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -91,7 +105,7 @@ Indexes: "sales_hash_3_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_4 - Table "public.sales_hash_4" + Table "s614.sales_hash_4" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -102,7 +116,7 @@ Indexes: "sales_hash_4_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash - Partitioned table "public.sales_hash" + Partitioned table "s614.sales_hash" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -113,14 +127,14 @@ Indexes: "sales_hash_pkey" PRIMARY KEY, btree (sale_id, sale_date) Number of partitions: 4 (Use \d+ to list them.) -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -166,7 +180,7 @@ INSERT INTO products_hash (product_id, product_date, product_name) VALUES INSERT 0 4 -- Validate structure and data \d+ products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | | | plain | | | @@ -178,14 +192,14 @@ Partitions: products_hash_1 FOR VALUES WITH (modulus 4, remainder 0), products_hash_3 FOR VALUES WITH (modulus 4, remainder 2), products_hash_4 FOR VALUES WITH (modulus 4, remainder 3) -EXECUTE spocktab('products_hash'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_hash'); -- Expect both parent and child tables in default_insert_only set nspname | relname | set_name ---------+-----------------+--------------------- - public | products_hash | default_insert_only - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only + s614 | products_hash | default_insert_only + s614 | products_hash_1 | default_insert_only + s614 | products_hash_2 | default_insert_only + s614 | products_hash_3 | default_insert_only + s614 | products_hash_4 | default_insert_only (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows @@ -203,7 +217,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate structure and data after adding primary key \d products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -215,7 +229,7 @@ Indexes: Number of partitions: 4 (Use \d+ to list them.) \d products_hash_1 - Table "public.products_hash_1" + Table "s614.products_hash_1" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -226,7 +240,7 @@ Indexes: "products_hash_1_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_2 - Table "public.products_hash_2" + Table "s614.products_hash_2" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -237,7 +251,7 @@ Indexes: "products_hash_2_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_3 - Table "public.products_hash_3" + Table "s614.products_hash_3" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -248,7 +262,7 @@ Indexes: "products_hash_3_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_4 - Table "public.products_hash_4" + Table "s614.products_hash_4" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -258,14 +272,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to change to default nspname | relname | set_name ---------+-----------------+---------- - public | products_hash | default - public | products_hash_1 | default - public | products_hash_2 | default - public | products_hash_3 | default - public | products_hash_4 | default + s614 | products_hash | default + s614 | products_hash_1 | default + s614 | products_hash_2 | default + s614 | products_hash_3 | default + s614 | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.sql b/t/auto_ddl/6144a_table_hash_partitions_n1.sql index 069c44b..9293960 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.sql +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s614; + +GRANT ALL PRIVILEGES ON SCHEMA s614 TO appuser; + +SET ROLE appuser; + +SET search_path TO s614, public; ----------------------------- -- Hash Partitioning @@ -30,7 +38,7 @@ INSERT INTO sales_hash (sale_id, sale_date, sale_amount) VALUES (3, '2023-01-03', 150.0), (4, '2023-01-04', 250.0); -EXECUTE spocktab('sales_hash'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect both parent and child tables in default repset SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Validate structure and data after adding new partition @@ -39,7 +47,7 @@ SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows \d sales_hash_3 \d sales_hash_4 \d sales_hash -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Create a hash partitioned table without primary key @@ -68,7 +76,7 @@ INSERT INTO products_hash (product_id, product_date, product_name) VALUES -- Validate structure and data \d+ products_hash -EXECUTE spocktab('products_hash'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_hash'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows -- Alter the products_hash table to add a primary key @@ -81,5 +89,5 @@ ALTER TABLE products_hash ADD PRIMARY KEY (product_id, product_date); \d products_hash_3 \d products_hash_4 -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to change to default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out index c106381..dc9c677 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out @@ -1,10 +1,17 @@ --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET -- Validate structure and data after adding new partition \d sales_hash_1 - Table "public.sales_hash_1" + Table "s614.sales_hash_1" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -15,7 +22,7 @@ Indexes: "sales_hash_1_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_2 - Table "public.sales_hash_2" + Table "s614.sales_hash_2" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -26,7 +33,7 @@ Indexes: "sales_hash_2_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_3 - Table "public.sales_hash_3" + Table "s614.sales_hash_3" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -37,7 +44,7 @@ Indexes: "sales_hash_3_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_4 - Table "public.sales_hash_4" + Table "s614.sales_hash_4" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -48,7 +55,7 @@ Indexes: "sales_hash_4_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash - Partitioned table "public.sales_hash" + Partitioned table "s614.sales_hash" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -59,14 +66,14 @@ Indexes: "sales_hash_pkey" PRIMARY KEY, btree (sale_id, sale_date) Number of partitions: 4 (Use \d+ to list them.) -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -88,7 +95,7 @@ NOTICE: drop cascades to table sales_hash membership in replication set default INFO: DDL statement replicated. DROP TABLE \d products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -100,7 +107,7 @@ Indexes: Number of partitions: 4 (Use \d+ to list them.) \d products_hash_1 - Table "public.products_hash_1" + Table "s614.products_hash_1" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -111,7 +118,7 @@ Indexes: "products_hash_1_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_2 - Table "public.products_hash_2" + Table "s614.products_hash_2" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -122,7 +129,7 @@ Indexes: "products_hash_2_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_3 - Table "public.products_hash_3" + Table "s614.products_hash_3" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -133,7 +140,7 @@ Indexes: "products_hash_3_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_4 - Table "public.products_hash_4" + Table "s614.products_hash_4" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -143,14 +150,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -EXECUTE spocktab('products_hash'); -- Expect the replication set to be default +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to be default nspname | relname | set_name ---------+-----------------+---------- - public | products_hash | default - public | products_hash_1 | default - public | products_hash_2 | default - public | products_hash_3 | default - public | products_hash_4 | default + s614 | products_hash | default + s614 | products_hash_1 | default + s614 | products_hash_2 | default + s614 | products_hash_3 | default + s614 | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql index 9d6fdca..0cdb67c 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql @@ -1,6 +1,9 @@ --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s614, public; -- Validate structure and data after adding new partition \d sales_hash_1 @@ -8,7 +11,7 @@ PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE re \d sales_hash_3 \d sales_hash_4 \d sales_hash -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE sales_hash CASCADE; @@ -19,7 +22,7 @@ DROP TABLE sales_hash CASCADE; \d products_hash_3 \d products_hash_4 -EXECUTE spocktab('products_hash'); -- Expect the replication set to be default +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to be default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE products_hash CASCADE; diff --git a/t/auto_ddl/6144c_table_hash_parition_validate_n1.out b/t/auto_ddl/6144c_table_hash_parition_validate_n1.out index 97e2a6c..7b3bfb4 100644 --- a/t/auto_ddl/6144c_table_hash_parition_validate_n1.out +++ b/t/auto_ddl/6144c_table_hash_parition_validate_n1.out @@ -1,11 +1,15 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6144b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET -- none of these tables should exist. \d sales_hash_1 Did not find any relation named "sales_hash_1". @@ -17,8 +21,11 @@ Did not find any relation named "sales_hash_3". Did not find any relation named "sales_hash_4". \d sales_hash Did not find any relation named "sales_hash". -/* -*/ +SELECT * FROM get_table_repset_info('sales_hash'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d products_hash Did not find any relation named "products_hash". \d products_hash_1 @@ -29,3 +36,14 @@ Did not find any relation named "products_hash_2". Did not find any relation named "products_hash_3". \d products_hash_4 Did not find any relation named "products_hash_4". +SELECT * FROM get_table_repset_info('products_hash'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s614 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql b/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql index b0a6a2f..8e6908c 100644 --- a/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql +++ b/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql @@ -1,19 +1,26 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6144b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s614, public; -- none of these tables should exist. \d sales_hash_1 \d sales_hash_2 \d sales_hash_3 \d sales_hash_4 \d sales_hash +SELECT * FROM get_table_repset_info('sales_hash'); -/* -*/ \d products_hash \d products_hash_1 \d products_hash_2 \d products_hash_3 \d products_hash_4 +SELECT * FROM get_table_repset_info('products_hash'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s614 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6155a_index_n1.out b/t/auto_ddl/6155a_index_n1.out index e829596..957c4c3 100644 --- a/t/auto_ddl/6155a_index_n1.out +++ b/t/auto_ddl/6155a_index_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s615; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s615 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET ----------------------------- -- INDEX tests ----------------------------- @@ -41,20 +55,20 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *product_catalog_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------+-------+-------+----------------- - public | brin_product_catalog_idx | index | rocky | product_catalog - public | btree_product_catalog_idx | index | rocky | product_catalog - public | gin_product_catalog_idx | index | rocky | product_catalog - public | gist_product_catalog_idx | index | rocky | product_catalog - public | hash_product_catalog_idx | index | rocky | product_catalog - public | product_catalog_pkey | index | rocky | product_catalog - public | spgist_product_catalog_idx | index | rocky | product_catalog + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------+-------+---------+----------------- + s615 | brin_product_catalog_idx | index | appuser | product_catalog + s615 | btree_product_catalog_idx | index | appuser | product_catalog + s615 | gin_product_catalog_idx | index | appuser | product_catalog + s615 | gist_product_catalog_idx | index | appuser | product_catalog + s615 | hash_product_catalog_idx | index | appuser | product_catalog + s615 | product_catalog_pkey | index | appuser | product_catalog + s615 | spgist_product_catalog_idx | index | appuser | product_catalog (7 rows) \d product_catalog - Table "public.product_catalog" + Table "s615.product_catalog" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -94,15 +108,15 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *_emp_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------+-------+-------+-------------------- - public | unique_emp_email_idx | index | rocky | employee_directory - public | unique_emp_id_idx | index | rocky | employee_directory + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------+-------+---------+-------------------- + s615 | unique_emp_email_idx | index | appuser | employee_directory + s615 | unique_emp_id_idx | index | appuser | employee_directory (2 rows) \d employee_directory - Table "public.employee_directory" + Table "s615.employee_directory" Column | Type | Collation | Nullable | Default -----------+------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -135,11 +149,11 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *sales_* - List of relations - Schema | Name | Type | Owner | Table ---------+-----------------------+-------+-------+------------ - public | func_sales_amount_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+-----------------------+-------+---------+------------ + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (2 rows) -- Altering Indexes @@ -149,12 +163,12 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+------------------------+-------+-------+------------ - public | alter_sales_region_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+------------------------+-------+---------+------------ + s615 | alter_sales_region_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (3 rows) -- Alter the index to rename it @@ -171,18 +185,18 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate index \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+--------------------------+-------+-------+------------ - public | conditional_sales_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | partial_sales_idx | index | rocky | sales_data - public | renamed_sales_region_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+--------------------------+-------+---------+------------ + s615 | conditional_sales_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | partial_sales_idx | index | appuser | sales_data + s615 | renamed_sales_region_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (5 rows) \d sales_data - Table "public.sales_data" + Table "s615.sales_data" Column | Type | Collation | Nullable | Default -------------+-----------------------+-----------+----------+--------- sale_id | integer | | not null | @@ -218,15 +232,15 @@ WARNING: This DDL statement will not be replicated. CREATE INDEX -- Validate concurrently created indexes \di *concurrent* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------------+-------+-------+-------------------- - public | concurrent_idx_tbl_name_idx | index | rocky | concurrent_idx_tbl - public | concurrent_unique_idx_tbl_id_idx | index | rocky | concurrent_idx_tbl + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------------+-------+---------+-------------------- + s615 | concurrent_idx_tbl_name_idx | index | appuser | concurrent_idx_tbl + s615 | concurrent_unique_idx_tbl_id_idx | index | appuser | concurrent_idx_tbl (2 rows) \d concurrent_idx_tbl - Table "public.concurrent_idx_tbl" + Table "s615.concurrent_idx_tbl" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | integer | | | @@ -268,27 +282,27 @@ SELECT * FROM concurrent_idx_tbl WHERE name = 'Second'; (1 row) -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set nspname | relname | set_name ---------+-----------------+---------- - public | product_catalog | default + s615 | product_catalog | default (1 row) -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set nspname | relname | set_name ---------+--------------------+---------- - public | employee_directory | default + s615 | employee_directory | default (1 row) -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set nspname | relname | set_name ---------+------------+---------- - public | sales_data | default + s615 | sales_data | default (1 row) -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set nspname | relname | set_name ---------+--------------------+--------------------- - public | concurrent_idx_tbl | default_insert_only + s615 | concurrent_idx_tbl | default_insert_only (1 row) diff --git a/t/auto_ddl/6155a_index_n1.sql b/t/auto_ddl/6155a_index_n1.sql index 35c4914..f538c98 100644 --- a/t/auto_ddl/6155a_index_n1.sql +++ b/t/auto_ddl/6155a_index_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s615; + +GRANT ALL PRIVILEGES ON SCHEMA s615 TO appuser; + +SET ROLE appuser; + +SET search_path TO s615, public; ----------------------------- -- INDEX tests @@ -126,7 +134,7 @@ SELECT * FROM sales_data WHERE sale_amount > 150; SELECT * FROM concurrent_idx_tbl WHERE name = 'Second'; -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set diff --git a/t/auto_ddl/6155b_index_validate_n2.out b/t/auto_ddl/6155b_index_validate_n2.out index 7aec7a9..f43a6b6 100644 --- a/t/auto_ddl/6155b_index_validate_n2.out +++ b/t/auto_ddl/6155b_index_validate_n2.out @@ -1,23 +1,30 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET -- Validate and drop indexes on n2 -- Validate indexes on product_catalog \di *product_catalog_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------+-------+-------+----------------- - public | brin_product_catalog_idx | index | rocky | product_catalog - public | btree_product_catalog_idx | index | rocky | product_catalog - public | gin_product_catalog_idx | index | rocky | product_catalog - public | gist_product_catalog_idx | index | rocky | product_catalog - public | hash_product_catalog_idx | index | rocky | product_catalog - public | product_catalog_pkey | index | rocky | product_catalog - public | spgist_product_catalog_idx | index | rocky | product_catalog + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------+-------+---------+----------------- + s615 | brin_product_catalog_idx | index | appuser | product_catalog + s615 | btree_product_catalog_idx | index | appuser | product_catalog + s615 | gin_product_catalog_idx | index | appuser | product_catalog + s615 | gist_product_catalog_idx | index | appuser | product_catalog + s615 | hash_product_catalog_idx | index | appuser | product_catalog + s615 | product_catalog_pkey | index | appuser | product_catalog + s615 | spgist_product_catalog_idx | index | appuser | product_catalog (7 rows) \d product_catalog - Table "public.product_catalog" + Table "s615.product_catalog" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -43,15 +50,15 @@ SELECT * FROM product_catalog WHERE product_id = 2; -- Expect 1 row with product -- Validate indexes on employee_directory \di *_emp_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------+-------+-------+-------------------- - public | unique_emp_email_idx | index | rocky | employee_directory - public | unique_emp_id_idx | index | rocky | employee_directory + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------+-------+---------+-------------------- + s615 | unique_emp_email_idx | index | appuser | employee_directory + s615 | unique_emp_id_idx | index | appuser | employee_directory (2 rows) \d employee_directory - Table "public.employee_directory" + Table "s615.employee_directory" Column | Type | Collation | Nullable | Default -----------+------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -71,18 +78,18 @@ SELECT * FROM employee_directory WHERE emp_email = 'bob@example.com'; -- Expect -- Validate indexes on sales_data \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+--------------------------+-------+-------+------------ - public | conditional_sales_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | partial_sales_idx | index | rocky | sales_data - public | renamed_sales_region_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+--------------------------+-------+---------+------------ + s615 | conditional_sales_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | partial_sales_idx | index | appuser | sales_data + s615 | renamed_sales_region_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (5 rows) \d sales_data - Table "public.sales_data" + Table "s615.sales_data" Column | Type | Collation | Nullable | Default -------------+-----------------------+-----------+----------+--------- sale_id | integer | | not null | @@ -107,7 +114,7 @@ SELECT * FROM sales_data WHERE sale_amount * 2 = 300.0; -- Expect 1 row with sal \di *concurrent* Did not find any relation named "*concurrent*". \d concurrent_idx_tbl - Table "public.concurrent_idx_tbl" + Table "s615.concurrent_idx_tbl" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | integer | | | @@ -165,28 +172,28 @@ ERROR: index "concurrent_idx_tbl_name_idx" does not exist DROP INDEX CONCURRENTLY concurrent_unique_idx_tbl_id_idx; --error (since this did not replicate to n2) ERROR: index "concurrent_unique_idx_tbl_id_idx" does not exist -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set nspname | relname | set_name ---------+-----------------+---------- - public | product_catalog | default + s615 | product_catalog | default (1 row) -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set nspname | relname | set_name ---------+--------------------+---------- - public | employee_directory | default + s615 | employee_directory | default (1 row) -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set nspname | relname | set_name ---------+------------+---------- - public | sales_data | default + s615 | sales_data | default (1 row) -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set nspname | relname | set_name ---------+--------------------+--------------------- - public | concurrent_idx_tbl | default_insert_only + s615 | concurrent_idx_tbl | default_insert_only (1 row) DROP TABLE product_catalog CASCADE; diff --git a/t/auto_ddl/6155b_index_validate_n2.sql b/t/auto_ddl/6155b_index_validate_n2.sql index b2d53b4..58af39b 100644 --- a/t/auto_ddl/6155b_index_validate_n2.sql +++ b/t/auto_ddl/6155b_index_validate_n2.sql @@ -1,5 +1,8 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s615, public; -- Validate and drop indexes on n2 @@ -50,10 +53,10 @@ DROP INDEX CONCURRENTLY concurrent_idx_tbl_name_idx; --error (since this did not DROP INDEX CONCURRENTLY concurrent_unique_idx_tbl_id_idx; --error (since this did not replicate to n2) -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set DROP TABLE product_catalog CASCADE; DROP TABLE sales_data CASCADE; diff --git a/t/auto_ddl/6155c_index_validate_drop_n1.out b/t/auto_ddl/6155c_index_validate_drop_n1.out index 9ea3c5c..8a02059 100644 --- a/t/auto_ddl/6155c_index_validate_drop_n1.out +++ b/t/auto_ddl/6155c_index_validate_drop_n1.out @@ -1,12 +1,15 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6155b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) --- none of these tables should exist. +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET -- Validate indexes on product_catalog, should not exist \di *product_catalog_* Did not find any relation named "*product_catalog_*". @@ -27,3 +30,9 @@ Did not find any relation named "sales_data". Did not find any relation named "*concurrent*". \d concurrent_idx_tbl Did not find any relation named "concurrent_idx_tbl". +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s615 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6155c_index_validate_drop_n1.sql b/t/auto_ddl/6155c_index_validate_drop_n1.sql index 22bc68e..3985066 100644 --- a/t/auto_ddl/6155c_index_validate_drop_n1.sql +++ b/t/auto_ddl/6155c_index_validate_drop_n1.sql @@ -1,9 +1,10 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6155b +SELECT pg_sleep(1);--to ensure all objects are replicated ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; --- none of these tables should exist. +SET ROLE appuser; + +SET search_path TO s615, public; -- Validate indexes on product_catalog, should not exist \di *product_catalog_* @@ -20,3 +21,7 @@ SELECT * FROM spock.tables ORDER BY relid; -- Validate concurrently created indexes on concurrent_idx_tbl, should not exist \di *concurrent* \d concurrent_idx_tbl + +RESET ROLE; +--dropping the schema +DROP SCHEMA s615 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6166a_views_materialized_views_n1.out b/t/auto_ddl/6166a_views_materialized_views_n1.out index a6c74db..b728d48 100644 --- a/t/auto_ddl/6166a_views_materialized_views_n1.out +++ b/t/auto_ddl/6166a_views_materialized_views_n1.out @@ -1,8 +1,26 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role -- Create user schema for testing CREATE SCHEMA test_schema; INFO: DDL statement replicated. CREATE SCHEMA -SET search_path TO test_schema, public; +CREATE SCHEMA IF NOT EXISTS s616; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s616 TO appuser; +INFO: DDL statement replicated. +GRANT +GRANT ALL PRIVILEGES ON SCHEMA test_schema TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO test_schema, s616; SET -- Create a base table with a primary key CREATE TABLE test_tbl ( @@ -97,38 +115,37 @@ CREATE MATERIALIZED VIEW mv_test_view_tablespace TABLESPACE pg_default AS SELECT id, name, age FROM test_tbl WHERE age > 30; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 --- Reset search_path to default -RESET search_path; -RESET +SET search_path TO s616, test_schema; +SET -- Create a simple view in the default schema -CREATE VIEW public.view_test_default AS +CREATE VIEW s616.view_test_default AS SELECT * FROM test_schema.test_tbl_no_pk; INFO: DDL statement replicated. CREATE VIEW -- Create or replace a view in the default schema -CREATE OR REPLACE VIEW public.view_test_default AS +CREATE OR REPLACE VIEW s616.view_test_default AS SELECT id, description FROM test_schema.test_tbl_no_pk WHERE id > 1; INFO: DDL statement replicated. CREATE VIEW --creating views and materialized views that depend on other views -- Create a view that depends on another view CREATE VIEW test_schema.view_depends_on_default AS -SELECT id, description FROM public.view_test_default WHERE id > 1; +SELECT id, description FROM s616.view_test_default WHERE id > 1; INFO: DDL statement replicated. CREATE VIEW -- Create a materialized view that depends on a view in another schema -CREATE MATERIALIZED VIEW public.mv_depends_on_test_schema AS +CREATE MATERIALIZED VIEW s616.mv_depends_on_test_schema AS SELECT id, name, age FROM test_schema.mv_test_view; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 --- Create a new view that depends on the materialized view public.mv_depends_on_test_schema -CREATE VIEW public.view_depends_on_mv AS -SELECT id, name FROM public.mv_depends_on_test_schema WHERE age > 30; +-- Create a new view that depends on the materialized view s616.mv_depends_on_test_schema +CREATE VIEW s616.view_depends_on_mv AS +SELECT id, name FROM s616.mv_depends_on_test_schema WHERE age > 30; INFO: DDL statement replicated. CREATE VIEW -- Create a new materialized view that depends on a regular view -CREATE MATERIALIZED VIEW public.mv_depends_on_mv AS -SELECT id, name FROM public.view_depends_on_mv; +CREATE MATERIALIZED VIEW s616.mv_depends_on_mv AS +SELECT id, name FROM s616.view_depends_on_mv; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 -- Validations @@ -145,7 +162,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 25; -- Expect 2 rows: Alice, Carol @@ -168,7 +185,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; -- Expect 1 row: Carol @@ -218,7 +235,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl; + FROM test_tbl; Options: security_barrier=true -- Expect 3 rows: Alice, Bob, Carol @@ -242,7 +259,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 25; Options: check_option=local @@ -266,7 +283,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -289,7 +306,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -312,7 +329,7 @@ View definition: SELECT id AS person_id, name AS person_name, age AS person_age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -335,7 +352,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -358,7 +375,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap Options: fillfactor=70 @@ -382,7 +399,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -394,8 +411,8 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; (1 row) -- Validation for view_test_default -\d+ public.view_test_default - View "public.view_test_default" +\d+ s616.view_test_default + View "s616.view_test_default" Column | Type | Collation | Nullable | Default | Storage | Description -------------+---------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -403,11 +420,11 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; View definition: SELECT id, description - FROM test_schema.test_tbl_no_pk + FROM test_tbl_no_pk WHERE id > 1; -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; id | description ----+-------------------- 2 | Second description @@ -434,8 +451,8 @@ SELECT * FROM test_schema.view_depends_on_default ORDER BY id; (1 row) -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema - Materialized view "public.mv_depends_on_test_schema" +\d+ s616.mv_depends_on_test_schema + Materialized view "s616.mv_depends_on_test_schema" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -445,19 +462,19 @@ View definition: SELECT id, name, age - FROM test_schema.mv_test_view; + FROM mv_test_view; Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; id | name | age ----+-------+----- 3 | Carol | 35 (1 row) -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv - View "public.view_depends_on_mv" +\d+ s616.view_depends_on_mv + View "s616.view_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Description --------+-----------------------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -469,15 +486,15 @@ View definition: WHERE age > 30; -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv - Materialized view "public.mv_depends_on_mv" +\d+ s616.mv_depends_on_mv + Materialized view "s616.mv_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -489,7 +506,7 @@ View definition: Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol diff --git a/t/auto_ddl/6166a_views_materialized_views_n1.sql b/t/auto_ddl/6166a_views_materialized_views_n1.sql index 3b874a1..25a68cd 100644 --- a/t/auto_ddl/6166a_views_materialized_views_n1.sql +++ b/t/auto_ddl/6166a_views_materialized_views_n1.sql @@ -1,6 +1,17 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role + -- Create user schema for testing CREATE SCHEMA test_schema; -SET search_path TO test_schema, public; +CREATE SCHEMA IF NOT EXISTS s616; + +GRANT ALL PRIVILEGES ON SCHEMA s616 TO appuser; +GRANT ALL PRIVILEGES ON SCHEMA test_schema TO appuser; + +SET ROLE appuser; + +SET search_path TO test_schema, s616; -- Create a base table with a primary key CREATE TABLE test_tbl ( @@ -79,33 +90,32 @@ SELECT id, name, age FROM test_tbl WHERE age > 30; CREATE MATERIALIZED VIEW mv_test_view_tablespace TABLESPACE pg_default AS SELECT id, name, age FROM test_tbl WHERE age > 30; --- Reset search_path to default -RESET search_path; +SET search_path TO s616, test_schema; -- Create a simple view in the default schema -CREATE VIEW public.view_test_default AS +CREATE VIEW s616.view_test_default AS SELECT * FROM test_schema.test_tbl_no_pk; -- Create or replace a view in the default schema -CREATE OR REPLACE VIEW public.view_test_default AS +CREATE OR REPLACE VIEW s616.view_test_default AS SELECT id, description FROM test_schema.test_tbl_no_pk WHERE id > 1; --creating views and materialized views that depend on other views -- Create a view that depends on another view CREATE VIEW test_schema.view_depends_on_default AS -SELECT id, description FROM public.view_test_default WHERE id > 1; +SELECT id, description FROM s616.view_test_default WHERE id > 1; -- Create a materialized view that depends on a view in another schema -CREATE MATERIALIZED VIEW public.mv_depends_on_test_schema AS +CREATE MATERIALIZED VIEW s616.mv_depends_on_test_schema AS SELECT id, name, age FROM test_schema.mv_test_view; --- Create a new view that depends on the materialized view public.mv_depends_on_test_schema -CREATE VIEW public.view_depends_on_mv AS -SELECT id, name FROM public.mv_depends_on_test_schema WHERE age > 30; +-- Create a new view that depends on the materialized view s616.mv_depends_on_test_schema +CREATE VIEW s616.view_depends_on_mv AS +SELECT id, name FROM s616.mv_depends_on_test_schema WHERE age > 30; -- Create a new materialized view that depends on a regular view -CREATE MATERIALIZED VIEW public.mv_depends_on_mv AS -SELECT id, name FROM public.view_depends_on_mv; +CREATE MATERIALIZED VIEW s616.mv_depends_on_mv AS +SELECT id, name FROM s616.view_depends_on_mv; -- Validations -- Validate structure and data in views @@ -165,9 +175,9 @@ SELECT * FROM test_schema.mv_test_view_storage ORDER BY id; SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; -- Validation for view_test_default -\d+ public.view_test_default +\d+ s616.view_test_default -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; -- Validation for view_depends_on_default \d+ test_schema.view_depends_on_default @@ -175,16 +185,16 @@ SELECT * FROM public.view_test_default ORDER BY id; SELECT * FROM test_schema.view_depends_on_default ORDER BY id; -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema +\d+ s616.mv_depends_on_test_schema -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv +\d+ s616.view_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv +\d+ s616.mv_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; diff --git a/t/auto_ddl/6166b_view_mat_views_validate_n2.out b/t/auto_ddl/6166b_view_mat_views_validate_n2.out index 6b7275b..87452af 100644 --- a/t/auto_ddl/6166b_view_mat_views_validate_n2.out +++ b/t/auto_ddl/6166b_view_mat_views_validate_n2.out @@ -263,8 +263,8 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; (1 row) -- Validation for view_test_default -\d+ public.view_test_default - View "public.view_test_default" +\d+ s616.view_test_default + View "s616.view_test_default" Column | Type | Collation | Nullable | Default | Storage | Description -------------+---------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -276,7 +276,7 @@ View definition: WHERE id > 1; -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; id | description ----+-------------------- 2 | Second description @@ -292,7 +292,7 @@ SELECT * FROM public.view_test_default ORDER BY id; View definition: SELECT id, description - FROM view_test_default + FROM s616.view_test_default WHERE id > 1; -- Expect 1 row: Second description @@ -303,8 +303,8 @@ SELECT * FROM test_schema.view_depends_on_default ORDER BY id; (1 row) -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema - Materialized view "public.mv_depends_on_test_schema" +\d+ s616.mv_depends_on_test_schema + Materialized view "s616.mv_depends_on_test_schema" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -318,15 +318,15 @@ View definition: Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; id | name | age ----+-------+----- 3 | Carol | 35 (1 row) -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv - View "public.view_depends_on_mv" +\d+ s616.view_depends_on_mv + View "s616.view_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Description --------+-----------------------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -334,19 +334,19 @@ SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; View definition: SELECT id, name - FROM mv_depends_on_test_schema + FROM s616.mv_depends_on_test_schema WHERE age > 30; -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv - Materialized view "public.mv_depends_on_mv" +\d+ s616.mv_depends_on_mv + Materialized view "s616.mv_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -354,25 +354,25 @@ SELECT * FROM public.view_depends_on_mv ORDER BY id; View definition: SELECT id, name - FROM view_depends_on_mv; + FROM s616.view_depends_on_mv; Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Drop views and materialized views --- Drop the materialized view in the public schema that depends on a view in the test_schema -DROP MATERIALIZED VIEW IF EXISTS public.mv_depends_on_test_schema CASCADE; +-- Drop the materialized view in the s616 schema that depends on a view in the test_schema +DROP MATERIALIZED VIEW IF EXISTS s616.mv_depends_on_test_schema CASCADE; NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to view view_depends_on_mv -drop cascades to materialized view mv_depends_on_mv +DETAIL: drop cascades to view s616.view_depends_on_mv +drop cascades to materialized view s616.mv_depends_on_mv INFO: DDL statement replicated. DROP MATERIALIZED VIEW --- Drop the view in the test_schema that depends on another view in the public schema +-- Drop the view in the test_schema that depends on another view in the s616 schema DROP VIEW test_schema.view_depends_on_default CASCADE; INFO: DDL statement replicated. DROP VIEW @@ -382,7 +382,7 @@ DROP VIEW IF EXISTS test_schema.view_test_1, test_schema.view_recursive, test_schema.view_with_options, test_schema.view_with_check_option, - public.view_test_default CASCADE; + s616.view_test_default CASCADE; INFO: DDL statement replicated. DROP VIEW DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, @@ -402,7 +402,3 @@ DROP TABLE IF EXISTS test_schema.test_tbl_no_pk CASCADE; NOTICE: drop cascades to table test_schema.test_tbl_no_pk membership in replication set default_insert_only INFO: DDL statement replicated. DROP TABLE --- Drop the schema -DROP SCHEMA test_schema CASCADE; -INFO: DDL statement replicated. -DROP SCHEMA diff --git a/t/auto_ddl/6166b_view_mat_views_validate_n2.sql b/t/auto_ddl/6166b_view_mat_views_validate_n2.sql index 6aa848d..365ebac 100644 --- a/t/auto_ddl/6166b_view_mat_views_validate_n2.sql +++ b/t/auto_ddl/6166b_view_mat_views_validate_n2.sql @@ -59,9 +59,9 @@ SELECT * FROM test_schema.mv_test_view_storage ORDER BY id; SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; -- Validation for view_test_default -\d+ public.view_test_default +\d+ s616.view_test_default -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; -- Validation for view_depends_on_default \d+ test_schema.view_depends_on_default @@ -69,26 +69,26 @@ SELECT * FROM public.view_test_default ORDER BY id; SELECT * FROM test_schema.view_depends_on_default ORDER BY id; -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema +\d+ s616.mv_depends_on_test_schema -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv +\d+ s616.view_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv +\d+ s616.mv_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; -- Drop views and materialized views --- Drop the materialized view in the public schema that depends on a view in the test_schema -DROP MATERIALIZED VIEW IF EXISTS public.mv_depends_on_test_schema CASCADE; +-- Drop the materialized view in the s616 schema that depends on a view in the test_schema +DROP MATERIALIZED VIEW IF EXISTS s616.mv_depends_on_test_schema CASCADE; --- Drop the view in the test_schema that depends on another view in the public schema +-- Drop the view in the test_schema that depends on another view in the s616 schema DROP VIEW test_schema.view_depends_on_default CASCADE; -- Drop all other views and materialized views @@ -97,7 +97,7 @@ DROP VIEW IF EXISTS test_schema.view_test_1, test_schema.view_recursive, test_schema.view_with_options, test_schema.view_with_check_option, - public.view_test_default CASCADE; + s616.view_test_default CASCADE; DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, test_schema.mv_test_view_colnames, @@ -109,5 +109,3 @@ DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, DROP TABLE IF EXISTS test_schema.test_tbl CASCADE; DROP TABLE IF EXISTS test_schema.test_tbl_no_pk CASCADE; --- Drop the schema -DROP SCHEMA test_schema CASCADE; diff --git a/t/auto_ddl/6166c_views_mat_view_validate_n1.out b/t/auto_ddl/6166c_views_mat_view_validate_n1.out index 3e3946a..00d6300 100644 --- a/t/auto_ddl/6166c_views_mat_view_validate_n1.out +++ b/t/auto_ddl/6166c_views_mat_view_validate_n1.out @@ -23,13 +23,23 @@ Did not find any relation named "test_schema.mv_test_view_method". Did not find any relation named "test_schema.mv_test_view_storage". \d test_schema.mv_test_view_tablespace Did not find any relation named "test_schema.mv_test_view_tablespace". -\d public.view_test_default -Did not find any relation named "public.view_test_default". +\d s616.view_test_default +Did not find any relation named "s616.view_test_default". \d test_schema.view_depends_on_default Did not find any relation named "test_schema.view_depends_on_default". -\d public.mv_depends_on_test_schema -Did not find any relation named "public.mv_depends_on_test_schema". -\d public.view_depends_on_mv -Did not find any relation named "public.view_depends_on_mv". -\d public.mv_depends_on_mv -Did not find any relation named "public.mv_depends_on_mv". +\d s616.mv_depends_on_test_schema +Did not find any relation named "s616.mv_depends_on_test_schema". +\d s616.view_depends_on_mv +Did not find any relation named "s616.view_depends_on_mv". +\d s616.mv_depends_on_mv +Did not find any relation named "s616.mv_depends_on_mv". +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s616 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA +-- Drop the schema +DROP SCHEMA test_schema CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6166c_views_mat_view_validate_n1.sql b/t/auto_ddl/6166c_views_mat_view_validate_n1.sql index cade815..a0d8cfc 100644 --- a/t/auto_ddl/6166c_views_mat_view_validate_n1.sql +++ b/t/auto_ddl/6166c_views_mat_view_validate_n1.sql @@ -24,12 +24,18 @@ \d test_schema.mv_test_view_tablespace -\d public.view_test_default +\d s616.view_test_default \d test_schema.view_depends_on_default -\d public.mv_depends_on_test_schema +\d s616.mv_depends_on_test_schema -\d public.view_depends_on_mv +\d s616.view_depends_on_mv -\d public.mv_depends_on_mv \ No newline at end of file +\d s616.mv_depends_on_mv + +RESET ROLE; +--dropping the schema +DROP SCHEMA s616 CASCADE; +-- Drop the schema +DROP SCHEMA test_schema CASCADE; \ No newline at end of file From b58fdcdcfc967e42ed5d6200c0e207a1ce06950d Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Mon, 28 Oct 2024 21:17:45 +0500 Subject: [PATCH 32/48] [AutoDDL] Update 6177/6666 a,b,c scripts to execute via nonsuperuser and adjust outputs Updated AutoDDL SQL scripts 6177 and 6666(a, b, c) to execute primarily under the non-superuser (appuser) role, switching to superuser where necessary. Adjusted the related SQL scripts and expected output files to reflect this change. --- ...low_ddl_from_func_proc_create_alter_n1.out | 152 +++++---- ...low_ddl_from_func_proc_create_alter_n1.sql | 38 ++- ...ow_ddl_from_func_proc_validate_drop_n2.out | 84 +++-- ...ow_ddl_from_func_proc_validate_drop_n2.sql | 24 +- ...c_allow_ddl_from_func_proc_validate_n1.out | 36 +- ...c_allow_ddl_from_func_proc_validate_n1.sql | 7 + t/auto_ddl/6666a_all_objects_create_n1.out | 310 ++++++++++-------- t/auto_ddl/6666a_all_objects_create_n1.sql | 148 +++++---- ...6666b_all_objects_validate_and_drop_n2.out | 92 +++--- t/auto_ddl/6666c_all_objects_validate_n1.out | 13 + t/auto_ddl/6666c_all_objects_validate_n1.sql | 6 + 11 files changed, 559 insertions(+), 351 deletions(-) diff --git a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out index 10d4e4d..2038845 100644 --- a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out +++ b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out @@ -1,6 +1,38 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser2 role +CREATE ROLE appuser2 LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +CREATE SCHEMA IF NOT EXISTS s617; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser2; +INFO: DDL statement replicated. +GRANT +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser2; +INFO: DDL statement replicated. +GRANT +GRANT ALL PRIVILEGES ON SCHEMA s617 TO appuser2; +INFO: DDL statement replicated. +GRANT +DO $$ +DECLARE + db_name TEXT; +BEGIN + -- Get the name of the current database + db_name := current_database(); + + -- Dynamically execute the GRANT command for appuser2 + EXECUTE format('GRANT CREATE ON DATABASE %I TO appuser2', db_name); +END $$; +INFO: DDL statement replicated. +DO -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; WARNING: This DDL statement will not be replicated. @@ -23,6 +55,10 @@ SHOW spock.allow_ddl_from_functions; on (1 row) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Create simple tables CREATE TABLE tab1_proc_on (id INT PRIMARY KEY, col1 TEXT, col2 INT); INFO: DDL statement replicated. @@ -118,16 +154,16 @@ FOR EACH ROW EXECUTE FUNCTION employee_insert_trigger(); INFO: DDL statement replicated. CREATE TRIGGER -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+--------------------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default_insert_only - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default_insert_only + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) -- Add a primary key to the table tab3 within an anonymous block @@ -187,21 +223,23 @@ INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. DO -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) ------ -- Turning allow_ddl_from_functions GUC off ------ +RESET ROLE; +RESET -- Turn off the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = off; WARNING: This DDL statement will not be replicated. @@ -224,6 +262,10 @@ SHOW spock.allow_ddl_from_functions; off (1 row) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Run anonymous block to create tab7 DO $$ BEGIN @@ -279,25 +321,25 @@ DO List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+--------------------------------------------------------------------------------------------------------------------+------ - public | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc + s617 | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc (1 row) \df remove_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+-------------------------------------------------------------+------ - public | remove_column_from_table | boolean | table_name character varying, column_name character varying | func + s617 | remove_column_from_table | boolean | table_name character varying, column_name character varying | func (1 row) \df employee_insert_trigger List of functions Schema | Name | Result data type | Argument data types | Type --------+-------------------------+------------------+---------------------+------ - public | employee_insert_trigger | trigger | | func + s617 | employee_insert_trigger | trigger | | func (1 row) \d+ tab1_proc_on - Table "public.tab1_proc_on" + Table "s617.tab1_proc_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -311,7 +353,7 @@ Indexes: Access method: heap \d+ tab2_func_on - Table "public.tab2_func_on" + Table "s617.tab2_func_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -325,7 +367,7 @@ Indexes: Access method: heap \d+ tab3_anon_on - Table "public.tab3_anon_on" + Table "s617.tab3_anon_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -336,7 +378,7 @@ Indexes: Access method: heap \d+ tab4_proc_off - Table "public.tab4_proc_off" + Table "s617.tab4_proc_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -350,7 +392,7 @@ Indexes: Access method: heap \d+ tab5_func_off - Table "public.tab5_func_off" + Table "s617.tab5_func_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -364,7 +406,7 @@ Indexes: Access method: heap \d+ tab6_anon_off - Table "public.tab6_anon_off" + Table "s617.tab6_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- col1 | text | | | | extended | | | @@ -372,7 +414,7 @@ Access method: heap Access method: heap \d+ tab7_anon_off - Table "public.tab7_anon_off" + Table "s617.tab7_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -381,7 +423,7 @@ Access method: heap Access method: heap \d+ tab_emp - Table "public.tab_emp" + Table "s617.tab_emp" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -394,43 +436,43 @@ Triggers: Access method: heap \dn john -List of schemas - Name | Owner -------+------- - john | rocky + List of schemas + Name | Owner +------+---------- + john | appuser2 (1 row) \dn alice -List of schemas - Name | Owner --------+------- - alice | rocky + List of schemas + Name | Owner +-------+---------- + alice | appuser2 (1 row) \dn cena -List of schemas - Name | Owner -------+------- - cena | rocky + List of schemas + Name | Owner +------+---------- + cena | appuser2 (1 row) \dn wonderland - List of schemas - Name | Owner -------------+------- - wonderland | rocky + List of schemas + Name | Owner +------------+---------- + wonderland | appuser2 (1 row) -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default - public | tab7_anon_off | + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default + s617 | tab7_anon_off | (8 rows) diff --git a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql index 69dfce5..8966c2a 100644 --- a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql +++ b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql @@ -1,5 +1,23 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser2 role +CREATE ROLE appuser2 LOGIN; +CREATE SCHEMA IF NOT EXISTS s617; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser2; +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser2; +GRANT ALL PRIVILEGES ON SCHEMA s617 TO appuser2; +DO $$ +DECLARE + db_name TEXT; +BEGIN + -- Get the name of the current database + db_name := current_database(); + + -- Dynamically execute the GRANT command for appuser2 + EXECUTE format('GRANT CREATE ON DATABASE %I TO appuser2', db_name); +END $$; -- Turn on the allow_ddl_from_functions GUC @@ -8,6 +26,10 @@ SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET ROLE appuser2; + +SET search_path TO s617, public; + -- Create simple tables CREATE TABLE tab1_proc_on (id INT PRIMARY KEY, col1 TEXT, col2 INT); CREATE TABLE tab2_func_on (id INT PRIMARY KEY, col1 TEXT, col2 INT, col3 TEXT, col4 INT, col5 TEXT, col6 INT, col7 TEXT, col8 INT); @@ -83,7 +105,7 @@ ON tab_emp FOR EACH ROW EXECUTE FUNCTION employee_insert_trigger(); -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); -- Add a primary key to the table tab3 within an anonymous block DO $$ @@ -116,18 +138,22 @@ BEGIN END $$; -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); ------ -- Turning allow_ddl_from_functions GUC off ------ - +RESET ROLE; -- Turn off the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = off; SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET ROLE appuser2; + +SET search_path TO s617, public; + -- Run anonymous block to create tab7 DO $$ BEGIN @@ -177,4 +203,4 @@ $$; \dn cena \dn wonderland -EXECUTE spocktab('tab'); \ No newline at end of file +SELECT * FROM get_table_repset_info('tab'); \ No newline at end of file diff --git a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out index 976ac45..e15e351 100644 --- a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out +++ b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out @@ -1,6 +1,9 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; WARNING: This DDL statement will not be replicated. @@ -23,30 +26,32 @@ SHOW spock.allow_ddl_from_functions; on (1 row) +SET search_path TO s617, public; +SET -- Validate replicated functions, procedures, tables \df add_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+--------------------------------------------------------------------------------------------------------------------+------ - public | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc + s617 | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc (1 row) \df remove_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+-------------------------------------------------------------+------ - public | remove_column_from_table | boolean | table_name character varying, column_name character varying | func + s617 | remove_column_from_table | boolean | table_name character varying, column_name character varying | func (1 row) \df employee_insert_trigger List of functions Schema | Name | Result data type | Argument data types | Type --------+-------------------------+------------------+---------------------+------ - public | employee_insert_trigger | trigger | | func + s617 | employee_insert_trigger | trigger | | func (1 row) \d+ tab1_proc_on - Table "public.tab1_proc_on" + Table "s617.tab1_proc_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -60,7 +65,7 @@ Indexes: Access method: heap \d+ tab2_func_on - Table "public.tab2_func_on" + Table "s617.tab2_func_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -74,7 +79,7 @@ Indexes: Access method: heap \d+ tab3_anon_on - Table "public.tab3_anon_on" + Table "s617.tab3_anon_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -85,7 +90,7 @@ Indexes: Access method: heap \d+ tab4_proc_off - Table "public.tab4_proc_off" + Table "s617.tab4_proc_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -96,7 +101,7 @@ Indexes: Access method: heap \d+ tab5_func_off - Table "public.tab5_func_off" + Table "s617.tab5_func_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -113,7 +118,7 @@ Indexes: Access method: heap \d+ tab6_anon_off - Table "public.tab6_anon_off" + Table "s617.tab6_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -126,7 +131,7 @@ Access method: heap \d+ tab7_anon_off Did not find any relation named "tab7_anon_off". \d+ tab_emp - Table "public.tab_emp" + Table "s617.tab_emp" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -139,10 +144,10 @@ Triggers: Access method: heap \dn john -List of schemas - Name | Owner -------+------- - john | rocky + List of schemas + Name | Owner +------+---------- + john | appuser2 (1 row) \dn alice @@ -152,10 +157,10 @@ List of schemas (0 rows) \dn cena -List of schemas - Name | Owner -------+------- - cena | rocky + List of schemas + Name | Owner +------+---------- + cena | appuser2 (1 row) \dn wonderland @@ -164,18 +169,22 @@ List of schemas ------+------- (0 rows) -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Drop tables DO $$ BEGIN @@ -189,8 +198,6 @@ BEGIN EXECUTE 'DROP PROCEDURE add_column_to_table_proc'; EXECUTE 'DROP FUNCTION remove_column_from_table'; EXECUTE 'DROP FUNCTION employee_insert_trigger'; - EXECUTE 'DROP SCHEMA john'; - EXECUTE 'DROP SCHEMA cena'; END $$; NOTICE: drop cascades to table tab1_proc_on membership in replication set default @@ -210,8 +217,6 @@ INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. -INFO: DDL statement replicated. -INFO: DDL statement replicated. DO DO $$ BEGIN @@ -221,7 +226,18 @@ $$; ERROR: table "tab7_anon_off" does not exist CONTEXT: SQL statement "DROP TABLE tab7_anon_off" PL/pgSQL function inline_code_block line 3 at EXECUTE ---should error out as these shouldn't be replicated to n2 +RESET ROLE; +RESET +DO $$ +BEGIN + EXECUTE 'DROP SCHEMA john'; + EXECUTE 'DROP SCHEMA cena'; +END +$$; +INFO: DDL statement replicated. +INFO: DDL statement replicated. +DO +--should error out as these shouldn't have been replicated to n2 DROP SCHEMA alice; ERROR: schema "alice" does not exist DROP SCHEMA wonderland; diff --git a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql index 38eecfa..be97c94 100644 --- a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql +++ b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql @@ -1,5 +1,4 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1); -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; @@ -7,6 +6,8 @@ SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET search_path TO s617, public; + -- Validate replicated functions, procedures, tables \df add_column* \df remove_column* @@ -23,7 +24,11 @@ SHOW spock.allow_ddl_from_functions; \dn alice \dn cena \dn wonderland -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); + +SET ROLE appuser2; + +SET search_path TO s617, public; -- Drop tables DO $$ BEGIN @@ -37,8 +42,6 @@ BEGIN EXECUTE 'DROP PROCEDURE add_column_to_table_proc'; EXECUTE 'DROP FUNCTION remove_column_from_table'; EXECUTE 'DROP FUNCTION employee_insert_trigger'; - EXECUTE 'DROP SCHEMA john'; - EXECUTE 'DROP SCHEMA cena'; END $$; @@ -47,6 +50,15 @@ BEGIN EXECUTE 'DROP TABLE tab7_anon_off'; --should not exist END $$; ---should error out as these shouldn't be replicated to n2 + +RESET ROLE; + +DO $$ +BEGIN + EXECUTE 'DROP SCHEMA john'; + EXECUTE 'DROP SCHEMA cena'; +END +$$; +--should error out as these shouldn't have been replicated to n2 DROP SCHEMA alice; DROP SCHEMA wonderland; \ No newline at end of file diff --git a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out index 712d4fc..73bd375 100644 --- a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out +++ b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out @@ -1,5 +1,13 @@ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + -- Validate replicated functions, procedures, tables -- No objects sould exist except tab7, schemas (alice,wonderland) +SET search_path TO s617, public; +SET \df add_column* List of functions Schema | Name | Result data type | Argument data types | Type @@ -31,7 +39,7 @@ Did not find any relation named "tab5_func_off". \d tab6_anon_off Did not find any relation named "tab6_anon_off". \d tab7_anon_off - Table "public.tab7_anon_off" + Table "s617.tab7_anon_off" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | | @@ -47,10 +55,10 @@ List of schemas (0 rows) \dn alice -List of schemas - Name | Owner --------+------- - alice | rocky + List of schemas + Name | Owner +-------+---------- + alice | appuser2 (1 row) \dn cena @@ -60,10 +68,10 @@ List of schemas (0 rows) \dn wonderland - List of schemas - Name | Owner -------------+------- - wonderland | rocky + List of schemas + Name | Owner +------------+---------- + wonderland | appuser2 (1 row) -- Turn off the allow_ddl_from_functions GUC so that these drops are not auto replicated @@ -107,3 +115,13 @@ SELECT pg_reload_conf(); t (1 row) +--cleanup +DROP SCHEMA s617 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA +DROP OWNED BY appuser2; +INFO: DDL statement replicated. +DROP OWNED +DROP ROLE appuser2; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql index 751e008..5d91eb6 100644 --- a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql +++ b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql @@ -1,5 +1,8 @@ +SELECT pg_sleep(1); -- Validate replicated functions, procedures, tables -- No objects sould exist except tab7, schemas (alice,wonderland) + +SET search_path TO s617, public; \df add_column* \df remove_column* \df employee_insert_trigger @@ -34,3 +37,7 @@ $$; -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; SELECT pg_reload_conf(); +--cleanup +DROP SCHEMA s617 CASCADE; +DROP OWNED BY appuser2; +DROP ROLE appuser2; diff --git a/t/auto_ddl/6666a_all_objects_create_n1.out b/t/auto_ddl/6666a_all_objects_create_n1.out index 235ba69..a0e25e2 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.out +++ b/t/auto_ddl/6666a_all_objects_create_n1.out @@ -1,10 +1,28 @@ --- Create spocktab prepared statement -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE $1 ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser3 (non-superuser) role +CREATE ROLE appuser3 LOGIN; +INFO: DDL statement replicated. +CREATE ROLE -- Create schema -CREATE SCHEMA s1; +CREATE SCHEMA s1 AUTHORIZATION appuser3; INFO: DDL statement replicated. CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser3; +INFO: DDL statement replicated. +GRANT +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser3; +INFO: DDL statement replicated. +GRANT +---------------- +SET ROLE TO adminuser; +SET +--performing the supseruser operations initially SET search_path TO s1; SET -- Create database @@ -47,10 +65,95 @@ WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. WARNING: This DDL statement will not be replicated. CREATE SUBSCRIPTION +-- Create language +CREATE LANGUAGE plperl; +INFO: DDL statement replicated. +CREATE EXTENSION +-- Create function for language internal +CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; +INFO: DDL statement replicated. +CREATE FUNCTION +-- Create foreign table +CREATE FOREIGN TABLE obj_foreign_table ( + id INT, + name TEXT +) SERVER obj_server; +INFO: DDL statement replicated. +CREATE FOREIGN TABLE +-- Create operator family +CREATE OPERATOR FAMILY obj_opfamily USING btree; +INFO: DDL statement replicated. +CREATE OPERATOR FAMILY +-- Create operator class +CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS + OPERATOR 1 < , + OPERATOR 2 <= , + OPERATOR 3 = , + OPERATOR 4 >= , + OPERATOR 5 > , + FUNCTION 1 btint4cmp(int4, int4); +INFO: DDL statement replicated. +CREATE OPERATOR CLASS +-- Create text search parser +CREATE TEXT SEARCH PARSER obj_tsparser ( + START = prsd_start, + GETTOKEN = prsd_nexttoken, + END = prsd_end, + LEXTYPES = prsd_lextype +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH PARSER +-- Create text search dictionary +CREATE TEXT SEARCH DICTIONARY obj_tsdict ( + TEMPLATE = simple +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH DICTIONARY +-- Create text search configuration +CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); +INFO: DDL statement replicated. +CREATE TEXT SEARCH CONFIGURATION +-- Create text search template +CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( + INIT = dsimple_init, + LEXIZE = dsimple_lexize +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH TEMPLATE +-- Create transform +CREATE TRANSFORM FOR int LANGUAGE SQL ( + FROM SQL WITH FUNCTION prsd_lextype(internal), + TO SQL WITH FUNCTION int4recv(internal)); +INFO: DDL statement replicated. +CREATE TRANSFORM +-- Create event trigger +CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Event trigger activated: %', tg_tag; +END $$; +INFO: DDL statement replicated. +CREATE FUNCTION +CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); +INFO: DDL statement replicated. +CREATE EVENT TRIGGER +-- Create group +CREATE GROUP obj_group; +INFO: DDL statement replicated. +CREATE ROLE +RESET ROLE; +RESET +-- non super user operations +-- switching to appuser3 with limited privileges +SET ROLE TO appuser3; +SET +SET search_path TO s1; +SET CREATE TYPE obj_type AS (x INT, y INT); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE DOMAIN obj_domain AS INT; +NOTICE: Event trigger activated: CREATE DOMAIN INFO: DDL statement replicated. CREATE DOMAIN -- Create cast @@ -58,64 +161,59 @@ CREATE FUNCTION obj_function_cast(obj_type) RETURNS INT LANGUAGE plpgsql AS $$ BEGIN RETURN $1.x + $1.y; END $$; +NOTICE: Event trigger activated: CREATE FUNCTION INFO: DDL statement replicated. CREATE FUNCTION -- Create the cast from obj_type1 to int CREATE CAST (obj_type AS int) WITH FUNCTION obj_function_cast(obj_type) AS IMPLICIT; +NOTICE: Event trigger activated: CREATE CAST INFO: DDL statement replicated. CREATE CAST -- Create aggregate -CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; -INFO: DDL statement replicated. -CREATE FUNCTION --- Create aggregate CREATE AGGREGATE obj_aggregate ( sfunc = int4_sum, stype = int4, basetype = int4, initcond = '0' ); +NOTICE: Event trigger activated: CREATE AGGREGATE INFO: DDL statement replicated. CREATE AGGREGATE -- Create collation CREATE COLLATION obj_collation (lc_collate = 'C', lc_ctype = 'C'); +NOTICE: Event trigger activated: CREATE COLLATION INFO: DDL statement replicated. CREATE COLLATION -- Create conversion CREATE CONVERSION obj_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +NOTICE: Event trigger activated: CREATE CONVERSION INFO: DDL statement replicated. CREATE CONVERSION -- Create domain CREATE DOMAIN obj_domain2 AS INT CHECK (VALUE >= 0); +NOTICE: Event trigger activated: CREATE DOMAIN INFO: DDL statement replicated. CREATE DOMAIN --- Create foreign table -CREATE FOREIGN TABLE obj_foreign_table ( - id INT, - name TEXT -) SERVER obj_server; -INFO: DDL statement replicated. -CREATE FOREIGN TABLE -- Create function CREATE FUNCTION obj_function() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN RETURN NEW; END $$; +NOTICE: Event trigger activated: CREATE FUNCTION INFO: DDL statement replicated. CREATE FUNCTION -- Create index CREATE TABLE obj_table (id INT PRIMARY KEY, name TEXT); +NOTICE: Event trigger activated: CREATE TABLE INFO: DDL statement replicated. CREATE TABLE CREATE INDEX obj_index ON obj_table (name); +NOTICE: Event trigger activated: CREATE INDEX INFO: DDL statement replicated. CREATE INDEX --- Create language -CREATE LANGUAGE plperl; -INFO: DDL statement replicated. -CREATE EXTENSION -- Create materialized view CREATE MATERIALIZED VIEW obj_mview AS SELECT * FROM obj_table WITH NO DATA; +NOTICE: Event trigger activated: CREATE MATERIALIZED VIEW WARNING: DDL statement replicated, but could be unsafe. CREATE MATERIALIZED VIEW -- Create operator @@ -125,24 +223,12 @@ CREATE OPERATOR ## ( function = path_inter, commutator = ## ); +NOTICE: Event trigger activated: CREATE OPERATOR INFO: DDL statement replicated. CREATE OPERATOR --- Create operator family -CREATE OPERATOR FAMILY obj_opfamily USING btree; -INFO: DDL statement replicated. -CREATE OPERATOR FAMILY --- Create operator class -CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS - OPERATOR 1 < , - OPERATOR 2 <= , - OPERATOR 3 = , - OPERATOR 4 >= , - OPERATOR 5 > , - FUNCTION 1 btint4cmp(int4, int4); -INFO: DDL statement replicated. -CREATE OPERATOR CLASS -- Create policy CREATE POLICY obj_policy ON obj_table FOR SELECT TO PUBLIC USING (true); +NOTICE: Event trigger activated: CREATE POLICY INFO: DDL statement replicated. CREATE POLICY -- Create procedure @@ -150,83 +236,47 @@ CREATE PROCEDURE obj_procedure() LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'Procedure executed'; END $$; +NOTICE: Event trigger activated: CREATE PROCEDURE INFO: DDL statement replicated. CREATE PROCEDURE -- Create rule CREATE RULE obj_rule AS ON INSERT TO obj_table DO ALSO NOTHING; +NOTICE: Event trigger activated: CREATE RULE INFO: DDL statement replicated. CREATE RULE --- Create text search dictionary -CREATE TEXT SEARCH DICTIONARY obj_tsdict ( - TEMPLATE = simple -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH DICTIONARY --- Create text search parser -CREATE TEXT SEARCH PARSER obj_tsparser ( - START = prsd_start, - GETTOKEN = prsd_nexttoken, - END = prsd_end, - LEXTYPES = prsd_lextype -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH PARSER --- Create text search configuration -CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); -INFO: DDL statement replicated. -CREATE TEXT SEARCH CONFIGURATION --- Create text search template -CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( - INIT = dsimple_init, - LEXIZE = dsimple_lexize -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH TEMPLATE --- Create transform -CREATE TRANSFORM FOR int LANGUAGE SQL ( - FROM SQL WITH FUNCTION prsd_lextype(internal), - TO SQL WITH FUNCTION int4recv(internal)); -INFO: DDL statement replicated. -CREATE TRANSFORM -- Create trigger CREATE TRIGGER obj_trigger AFTER INSERT ON obj_table FOR EACH ROW EXECUTE FUNCTION obj_function(); +NOTICE: Event trigger activated: CREATE TRIGGER INFO: DDL statement replicated. CREATE TRIGGER -- Create type CREATE TYPE obj_composite_type AS (x INT, y INT); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE TYPE obj_enum AS ENUM ('red', 'green', 'blue'); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE TYPE obj_range AS RANGE (subtype = int4range); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE -- Create view CREATE VIEW obj_view AS SELECT * FROM obj_table; +NOTICE: Event trigger activated: CREATE VIEW INFO: DDL statement replicated. CREATE VIEW --- Create group -CREATE GROUP obj_group; -INFO: DDL statement replicated. -CREATE ROLE --- Create event trigger -CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Event trigger activated: %', tg_tag; -END $$; -INFO: DDL statement replicated. -CREATE FUNCTION -CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); -INFO: DDL statement replicated. -CREATE EVENT TRIGGER +--swtiching back to superuser for validations +RESET ROLE; +RESET -- Meta command validations -- Validate database \l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges ---------------+-------+----------+-----------------+-------------+-------------+------------+-----------+------------------- - obj_database | rocky | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | + List of databases + Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges +--------------+-----------+----------+-----------------+-------------+-------------+------------+-----------+------------------- + obj_database | adminuser | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | (1 row) -- Validate extension @@ -253,27 +303,27 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate schema \dn s1 -List of schemas - Name | Owner -------+------- - s1 | rocky + List of schemas + Name | Owner +------+---------- + s1 | appuser3 Publications: "obj_publication" -- Validate foreign data wrapper \dew obj_fdw - List of foreign-data wrappers - Name | Owner | Handler | Validator ----------+-------+---------+----------- - obj_fdw | rocky | - | - + List of foreign-data wrappers + Name | Owner | Handler | Validator +---------+-----------+---------+----------- + obj_fdw | adminuser | - | - (1 row) -- Validate server \des obj_server - List of foreign servers - Name | Owner | Foreign-data wrapper -------------+-------+---------------------- - obj_server | rocky | obj_fdw + List of foreign servers + Name | Owner | Foreign-data wrapper +------------+-----------+---------------------- + obj_server | adminuser | obj_fdw (1 row) -- Validate user mapping @@ -281,23 +331,23 @@ Publications: List of user mappings Server | User name ------------+----------- - obj_server | rocky + obj_server | adminuser (1 row) -- Validate publication \dRp obj_publication - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+-------+------------+---------+---------+---------+-----------+---------- - obj_publication | rocky | f | t | t | t | t | f + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-----------------+-----------+------------+---------+---------+---------+-----------+---------- + obj_publication | adminuser | f | t | t | t | t | f (1 row) -- Validate subscription \dRs obj_subscription - List of subscriptions - Name | Owner | Enabled | Publication -------------------+-------+---------+------------------- - obj_subscription | rocky | f | {obj_publication} + List of subscriptions + Name | Owner | Enabled | Publication +------------------+-----------+---------+------------------- + obj_subscription | adminuser | f | {obj_publication} (1 row) -- Validate cast @@ -342,10 +392,10 @@ Publications: -- Validate event trigger \dy obj_event_trigger - List of event triggers - Name | Event | Owner | Enabled | Function | Tags --------------------+-------------------+-------+---------+----------------------------+------ - obj_event_trigger | ddl_command_start | rocky | enabled | obj_function_event_trigger | + List of event triggers + Name | Event | Owner | Enabled | Function | Tags +-------------------+-------------------+-----------+---------+----------------------------+------ + obj_event_trigger | ddl_command_start | adminuser | enabled | obj_function_event_trigger | (1 row) -- Validate foreign table @@ -366,18 +416,18 @@ Publications: -- Validate index \di obj_index - List of relations - Schema | Name | Type | Owner | Table ---------+-----------+-------+-------+----------- - s1 | obj_index | index | rocky | obj_table + List of relations + Schema | Name | Type | Owner | Table +--------+-----------+-------+----------+----------- + s1 | obj_index | index | appuser3 | obj_table (1 row) -- Validate language \dL plperl - List of languages - Name | Owner | Trusted | Description ---------+-------+---------+----------------------------- - plperl | rocky | t | PL/Perl procedural language + List of languages + Name | Owner | Trusted | Description +--------+-----------+---------+----------------------------- + plperl | adminuser | t | PL/Perl procedural language (1 row) -- Validate materialized view @@ -470,26 +520,26 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate type \dT+ obj_composite_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+--------------------+--------------------+-------+----------+-------+-------------------+------------- - s1 | obj_composite_type | obj_composite_type | tuple | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+--------------------+--------------------+-------+----------+----------+-------------------+------------- + s1 | obj_composite_type | obj_composite_type | tuple | | appuser3 | | (1 row) \dT+ obj_enum - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_enum | obj_enum | 4 | red +| rocky | | - | | | | green +| | | - | | | | blue | | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_enum | obj_enum | 4 | red +| appuser3 | | + | | | | green +| | | + | | | | blue | | | (1 row) \dT+ obj_range - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_range | obj_range | var | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+-----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_range | obj_range | var | | appuser3 | | (1 row) -- Validate view diff --git a/t/auto_ddl/6666a_all_objects_create_n1.sql b/t/auto_ddl/6666a_all_objects_create_n1.sql index 5264a05..f73e9b9 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.sql +++ b/t/auto_ddl/6666a_all_objects_create_n1.sql @@ -1,8 +1,18 @@ --- Create spocktab prepared statement -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE $1 ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated +--creating the necessary pre-reqs and then switching to the appuser3 (non-superuser) role +CREATE ROLE appuser3 LOGIN; -- Create schema -CREATE SCHEMA s1; +CREATE SCHEMA s1 AUTHORIZATION appuser3; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser3; +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser3; + + +---------------- +SET ROLE TO adminuser; +--performing the supseruser operations initially SET search_path TO s1; -- Create database @@ -33,8 +43,76 @@ CREATE PUBLICATION obj_publication FOR TABLES IN SCHEMA s1; -- Create subscription CREATE SUBSCRIPTION obj_subscription CONNECTION '' PUBLICATION obj_publication WITH (connect = false, slot_name = NONE); +-- Create language +CREATE LANGUAGE plperl; + +-- Create function for language internal +CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; + +-- Create foreign table +CREATE FOREIGN TABLE obj_foreign_table ( + id INT, + name TEXT +) SERVER obj_server; + +-- Create operator family +CREATE OPERATOR FAMILY obj_opfamily USING btree; + +-- Create operator class +CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS + OPERATOR 1 < , + OPERATOR 2 <= , + OPERATOR 3 = , + OPERATOR 4 >= , + OPERATOR 5 > , + FUNCTION 1 btint4cmp(int4, int4); + +-- Create text search parser +CREATE TEXT SEARCH PARSER obj_tsparser ( + START = prsd_start, + GETTOKEN = prsd_nexttoken, + END = prsd_end, + LEXTYPES = prsd_lextype +); + +-- Create text search dictionary +CREATE TEXT SEARCH DICTIONARY obj_tsdict ( + TEMPLATE = simple +); + +-- Create text search configuration +CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); + +-- Create text search template +CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( + INIT = dsimple_init, + LEXIZE = dsimple_lexize +); + +-- Create transform +CREATE TRANSFORM FOR int LANGUAGE SQL ( + FROM SQL WITH FUNCTION prsd_lextype(internal), + TO SQL WITH FUNCTION int4recv(internal)); + +-- Create event trigger +CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Event trigger activated: %', tg_tag; +END $$; +CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); + +-- Create group +CREATE GROUP obj_group; + +RESET ROLE; + +-- non super user operations +-- switching to appuser3 with limited privileges +SET ROLE TO appuser3; +SET search_path TO s1; CREATE TYPE obj_type AS (x INT, y INT); + CREATE DOMAIN obj_domain AS INT; -- Create cast CREATE FUNCTION obj_function_cast(obj_type) RETURNS INT LANGUAGE plpgsql AS $$ @@ -44,9 +122,6 @@ END $$; -- Create the cast from obj_type1 to int CREATE CAST (obj_type AS int) WITH FUNCTION obj_function_cast(obj_type) AS IMPLICIT; --- Create aggregate -CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; - -- Create aggregate CREATE AGGREGATE obj_aggregate ( sfunc = int4_sum, @@ -55,22 +130,15 @@ CREATE AGGREGATE obj_aggregate ( initcond = '0' ); - -- Create collation CREATE COLLATION obj_collation (lc_collate = 'C', lc_ctype = 'C'); -- Create conversion CREATE CONVERSION obj_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; - -- Create domain CREATE DOMAIN obj_domain2 AS INT CHECK (VALUE >= 0); --- Create foreign table -CREATE FOREIGN TABLE obj_foreign_table ( - id INT, - name TEXT -) SERVER obj_server; -- Create function CREATE FUNCTION obj_function() RETURNS TRIGGER LANGUAGE plpgsql AS $$ @@ -82,9 +150,6 @@ END $$; CREATE TABLE obj_table (id INT PRIMARY KEY, name TEXT); CREATE INDEX obj_index ON obj_table (name); --- Create language -CREATE LANGUAGE plperl; - -- Create materialized view CREATE MATERIALIZED VIEW obj_mview AS SELECT * FROM obj_table WITH NO DATA; @@ -96,18 +161,6 @@ CREATE OPERATOR ## ( commutator = ## ); --- Create operator family -CREATE OPERATOR FAMILY obj_opfamily USING btree; - --- Create operator class -CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS - OPERATOR 1 < , - OPERATOR 2 <= , - OPERATOR 3 = , - OPERATOR 4 >= , - OPERATOR 5 > , - FUNCTION 1 btint4cmp(int4, int4); - -- Create policy CREATE POLICY obj_policy ON obj_table FOR SELECT TO PUBLIC USING (true); @@ -120,33 +173,6 @@ END $$; -- Create rule CREATE RULE obj_rule AS ON INSERT TO obj_table DO ALSO NOTHING; --- Create text search dictionary -CREATE TEXT SEARCH DICTIONARY obj_tsdict ( - TEMPLATE = simple -); - --- Create text search parser -CREATE TEXT SEARCH PARSER obj_tsparser ( - START = prsd_start, - GETTOKEN = prsd_nexttoken, - END = prsd_end, - LEXTYPES = prsd_lextype -); - --- Create text search configuration -CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); - --- Create text search template -CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( - INIT = dsimple_init, - LEXIZE = dsimple_lexize -); - --- Create transform -CREATE TRANSFORM FOR int LANGUAGE SQL ( - FROM SQL WITH FUNCTION prsd_lextype(internal), - TO SQL WITH FUNCTION int4recv(internal)); - -- Create trigger CREATE TRIGGER obj_trigger AFTER INSERT ON obj_table FOR EACH ROW EXECUTE FUNCTION obj_function(); @@ -158,16 +184,8 @@ CREATE TYPE obj_range AS RANGE (subtype = int4range); -- Create view CREATE VIEW obj_view AS SELECT * FROM obj_table; --- Create group -CREATE GROUP obj_group; - --- Create event trigger -CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Event trigger activated: %', tg_tag; -END $$; -CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); - +--swtiching back to superuser for validations +RESET ROLE; -- Meta command validations -- Validate database diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out index f330329..6a78bc8 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out @@ -38,27 +38,27 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate schema \dn s1 -List of schemas - Name | Owner -------+------- - s1 | rocky + List of schemas + Name | Owner +------+---------- + s1 | appuser3 Publications: "obj_publication" -- Validate foreign data wrapper \dew obj_fdw - List of foreign-data wrappers - Name | Owner | Handler | Validator ----------+-------+---------+----------- - obj_fdw | rocky | - | - + List of foreign-data wrappers + Name | Owner | Handler | Validator +---------+-----------+---------+----------- + obj_fdw | adminuser | - | - (1 row) -- Validate server \des obj_server - List of foreign servers - Name | Owner | Foreign-data wrapper -------------+-------+---------------------- - obj_server | rocky | obj_fdw + List of foreign servers + Name | Owner | Foreign-data wrapper +------------+-----------+---------------------- + obj_server | adminuser | obj_fdw (1 row) -- Validate user mapping @@ -66,15 +66,15 @@ Publications: List of user mappings Server | User name ------------+----------- - obj_server | rocky + obj_server | adminuser (1 row) -- Validate publication \dRp obj_publication - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+-------+------------+---------+---------+---------+-----------+---------- - obj_publication | rocky | f | t | t | t | t | f + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-----------------+-----------+------------+---------+---------+---------+-----------+---------- + obj_publication | adminuser | f | t | t | t | t | f (1 row) -- Validate subscription, should not exist @@ -126,10 +126,10 @@ Publications: -- Validate event trigger \dy obj_event_trigger - List of event triggers - Name | Event | Owner | Enabled | Function | Tags --------------------+-------------------+-------+---------+----------------------------+------ - obj_event_trigger | ddl_command_start | rocky | enabled | obj_function_event_trigger | + List of event triggers + Name | Event | Owner | Enabled | Function | Tags +-------------------+-------------------+-----------+---------+----------------------------+------ + obj_event_trigger | ddl_command_start | adminuser | enabled | obj_function_event_trigger | (1 row) -- Validate foreign table @@ -150,18 +150,18 @@ Publications: -- Validate index \di obj_index - List of relations - Schema | Name | Type | Owner | Table ---------+-----------+-------+-------+----------- - s1 | obj_index | index | rocky | obj_table + List of relations + Schema | Name | Type | Owner | Table +--------+-----------+-------+----------+----------- + s1 | obj_index | index | appuser3 | obj_table (1 row) -- Validate language \dL plperl - List of languages - Name | Owner | Trusted | Description ---------+-------+---------+----------------------------- - plperl | rocky | t | PL/Perl procedural language + List of languages + Name | Owner | Trusted | Description +--------+-----------+---------+----------------------------- + plperl | adminuser | t | PL/Perl procedural language (1 row) -- Validate materialized view @@ -254,26 +254,26 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate type \dT+ obj_composite_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+--------------------+--------------------+-------+----------+-------+-------------------+------------- - s1 | obj_composite_type | obj_composite_type | tuple | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+--------------------+--------------------+-------+----------+----------+-------------------+------------- + s1 | obj_composite_type | obj_composite_type | tuple | | appuser3 | | (1 row) \dT+ obj_enum - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_enum | obj_enum | 4 | red +| rocky | | - | | | | green +| | | - | | | | blue | | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_enum | obj_enum | 4 | red +| appuser3 | | + | | | | green +| | | + | | | | blue | | | (1 row) \dT+ obj_range - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_range | obj_range | var | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+-----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_range | obj_range | var | | appuser3 | | (1 row) -- Validate view @@ -336,7 +336,7 @@ DROP EXTENSION DROP FOREIGN DATA WRAPPER obj_fdw CASCADE; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to server obj_server -drop cascades to user mapping for rocky on server obj_server +drop cascades to user mapping for adminuser on server obj_server drop cascades to foreign table obj_foreign_table INFO: DDL statement replicated. DROP FOREIGN DATA WRAPPER @@ -431,8 +431,8 @@ INFO: DDL statement replicated. DROP ROLE DROP SCHEMA s1 CASCADE; NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to type obj_type +DETAIL: drop cascades to function int4_sum(integer,integer) +drop cascades to type obj_type drop cascades to type obj_domain -drop cascades to function int4_sum(integer,integer) INFO: DDL statement replicated. DROP SCHEMA diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.out b/t/auto_ddl/6666c_all_objects_validate_n1.out index aa5cee2..349a7c3 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.out +++ b/t/auto_ddl/6666c_all_objects_validate_n1.out @@ -1,3 +1,9 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --Drop the objects directly on n1 that weren't auto replicated (expected) DROP DATABASE obj_database; WARNING: This DDL statement will not be replicated. @@ -247,3 +253,10 @@ Did not find any relation named "obj_table". Role name | Attributes -----------+------------ +--role cleanup +DROP OWNED BY appuser3; +INFO: DDL statement replicated. +DROP OWNED +DROP ROLE appuser3; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.sql b/t/auto_ddl/6666c_all_objects_validate_n1.sql index 973ba2b..e69acd2 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.sql +++ b/t/auto_ddl/6666c_all_objects_validate_n1.sql @@ -1,3 +1,5 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --Drop the objects directly on n1 that weren't auto replicated (expected) DROP DATABASE obj_database; --The tablespace will have to be dropped in the _c file @@ -114,3 +116,7 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate group \dg obj_group + +--role cleanup +DROP OWNED BY appuser3; +DROP ROLE appuser3; From 9816c7cbdfae420e6532d325bcaadb43f53cf110 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Tue, 29 Oct 2024 16:50:49 +0500 Subject: [PATCH 33/48] [AutoDDL] Regression stabilisation in the 6666 tests to account for pg17 catalog changes ensuring outputs stay consistent across pg versions. --- t/auto_ddl/6666a_all_objects_create_n1.out | 41 +++++++++++++------ t/auto_ddl/6666a_all_objects_create_n1.sql | 28 +++++++++++-- ...6666b_all_objects_validate_and_drop_n2.out | 38 ++++++++++++----- ...6666b_all_objects_validate_and_drop_n2.sql | 26 ++++++++++-- t/auto_ddl/6666c_all_objects_validate_n1.out | 36 ++++++++++++---- t/auto_ddl/6666c_all_objects_validate_n1.sql | 27 ++++++++++-- 6 files changed, 155 insertions(+), 41 deletions(-) diff --git a/t/auto_ddl/6666a_all_objects_create_n1.out b/t/auto_ddl/6666a_all_objects_create_n1.out index a0e25e2..30bdffd 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.out +++ b/t/auto_ddl/6666a_all_objects_create_n1.out @@ -271,12 +271,20 @@ CREATE VIEW RESET ROLE; RESET -- Meta command validations --- Validate database -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges ---------------+-----------+----------+-----------------+-------------+-------------+------------+-----------+------------------- - obj_database | adminuser | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding | collate | ctype +--------------+-----------+----------+-------------+------------- + obj_database | adminuser | UTF8 | en_US.UTF-8 | en_US.UTF-8 (1 row) -- Validate extension @@ -366,12 +374,21 @@ Publications: s1 | obj_aggregate | integer | integer | (1 row) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+---------------+----------+---------+-------+------------+-----------+---------------- - s1 | obj_collation | libc | C | C | | | yes +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name | collate | ctype +--------+---------------+---------+------- + s1 | obj_collation | C | C (1 row) -- Validate conversion diff --git a/t/auto_ddl/6666a_all_objects_create_n1.sql b/t/auto_ddl/6666a_all_objects_create_n1.sql index f73e9b9..b6bba6f 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.sql +++ b/t/auto_ddl/6666a_all_objects_create_n1.sql @@ -188,8 +188,18 @@ CREATE VIEW obj_view AS SELECT * FROM obj_table; RESET ROLE; -- Meta command validations --- Validate database -\l obj_database +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; + -- Validate extension \dx "uuid-ossp" @@ -224,8 +234,18 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; -- Validate conversion \dc obj_conversion diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out index 6a78bc8..3ce8e4f 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out @@ -6,10 +6,19 @@ SELECT pg_sleep(2);--to ensure all objects are replicated ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges -------+-------+----------+-----------------+---------+-------+------------+-----------+------------------- +-- due to catalog changes in pg17, we are not using a \l meta command anymore +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding | collate | ctype +------+-------+----------+---------+------- (0 rows) -- Validate extension @@ -100,12 +109,21 @@ Publications: s1 | obj_aggregate | integer | integer | (1 row) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+---------------+----------+---------+-------+------------+-----------+---------------- - s1 | obj_collation | libc | C | C | | | yes +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name | collate | ctype +--------+---------------+---------+------- + s1 | obj_collation | C | C (1 row) -- Validate conversion diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql index 4b3707e..6fd3eca 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql @@ -2,7 +2,17 @@ SELECT pg_sleep(2);--to ensure all objects are replicated ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist -\l obj_database +-- due to catalog changes in pg17, we are not using a \l meta command anymore +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; -- Validate extension \dx "uuid-ossp" @@ -38,8 +48,18 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; -- Validate conversion \dc obj_conversion diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.out b/t/auto_ddl/6666c_all_objects_validate_n1.out index 349a7c3..b1d772c 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.out +++ b/t/auto_ddl/6666c_all_objects_validate_n1.out @@ -18,10 +18,19 @@ WARNING: This DDL statement will not be replicated. DROP SUBSCRIPTION --Validate all objects on n1 do not exist -- Validate database -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges -------+-------+----------+-----------------+---------+-------+------------+-----------+------------------- +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding | collate | ctype +------+-------+----------+---------+------- (0 rows) -- Validate extension @@ -100,11 +109,20 @@ List of user mappings --------+------+------------------+---------------------+------------- (0 rows) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+------------+-----------+---------------- +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name | collate | ctype +--------+------+---------+------- (0 rows) -- Validate conversion diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.sql b/t/auto_ddl/6666c_all_objects_validate_n1.sql index e69acd2..6552ae0 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.sql +++ b/t/auto_ddl/6666c_all_objects_validate_n1.sql @@ -10,7 +10,17 @@ DROP SUBSCRIPTION obj_subscription; --Validate all objects on n1 do not exist -- Validate database -\l obj_database +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding, + datcollate AS collate, + datctype AS ctype +FROM + pg_database +WHERE + datname = 'obj_database'; -- Validate extension \dx "uuid-ossp" @@ -45,8 +55,19 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name, + c.collcollate AS collate, + c.collctype AS ctype +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + -- Validate conversion \dc obj_conversion From a5eff56fa325b0eec2e187e317f3acb83fba3839 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 29 Oct 2024 13:38:26 +0000 Subject: [PATCH 34/48] Removed duplicate from long-test schedule --- schedule_files/long-test | 1 - 1 file changed, 1 deletion(-) diff --git a/schedule_files/long-test b/schedule_files/long-test index d058bbf..1d25342 100644 --- a/schedule_files/long-test +++ b/schedule_files/long-test @@ -80,7 +80,6 @@ t/spock_node_drop_interface.py t/spock_node_add_interface_no_db.py t/spock_node_drop_interface_no_interface.py t/spock_node_drop_interface_no_db.py -t/spock_create_sub_specify_repsets.py t/spock_7_negative_list.py t/spock_8_negative_create.py t/cleanup_01_node_remove.py From 83cd841692e6b27fb710f737bace1dc02be045a8 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Wed, 30 Oct 2024 19:13:34 +0500 Subject: [PATCH 35/48] [AutoDDL] Regression stabilisation Adjusting a timestamp datatype to be without timezone so that output stays consistent across varying timezones. Also, changing the default locale value to C.UTF-8 in config.env --- .../6100a_table_datatypes_create_alter_n1.out | 92 +++++++++---------- .../6100a_table_datatypes_create_alter_n1.sql | 4 +- .../6100b_table_validate_and_drop_n2.out | 50 +++++----- t/lib/config.env | 2 +- 4 files changed, 74 insertions(+), 74 deletions(-) diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out index f4e343e..e0fc46e 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out @@ -248,7 +248,7 @@ CREATE TABLE products ( discontinued BOOLEAN, product_description TEXT, added TIMESTAMP WITHOUT TIME ZONE, - updated TIMESTAMPTZ + updated TIMESTAMP WITHOUT TIME ZONE ); INFO: DDL statement replicated. CREATE TABLE @@ -269,7 +269,7 @@ INSERT 0 2 discontinued | boolean | | | product_description | text | | | added | timestamp without time zone | | | - updated | timestamp with time zone | | | + updated | timestamp without time zone | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -301,7 +301,7 @@ ALTER TABLE discontinued | boolean | | | product_description | text | | | added | timestamp without time zone | | | - updated | timestamp with time zone | | | + updated | timestamp without time zone | | | category | character varying(50) | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -386,7 +386,7 @@ SELECT * FROM get_table_repset_info('test_tab1'); -- Create table to test more data types and constraints CREATE TABLE test_tab2 ( id INT PRIMARY KEY, - timestamp_col TIMESTAMPTZ, + timestamp_col TIMESTAMP WITHOUT TIME ZONE, interval_col INTERVAL, inet_col INET, cidr_col CIDR, @@ -408,23 +408,23 @@ INSERT INTO test_tab2 (id, timestamp_col, interval_col, inet_col, cidr_col, maca INSERT 0 1 -- Validate the structure, spock.tables catalog table and data \d test_tab2 - Table "s610.test_tab2" - Column | Type | Collation | Nullable | Default ----------------+--------------------------+-----------+----------+--------- - id | integer | | not null | - timestamp_col | timestamp with time zone | | | - interval_col | interval | | | - inet_col | inet | | | - cidr_col | cidr | | | - macaddr_col | macaddr | | | - bit_col | bit(8) | | | - varbit_col | bit varying(8) | | | - box_col | box | | | - circle_col | circle | | | - line_col | line | | | - lseg_col | lseg | | | - path_col | path | | | - polygon_col | polygon | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default +---------------+-----------------------------+-----------+----------+--------- + id | integer | | not null | + timestamp_col | timestamp without time zone | | | + interval_col | interval | | | + inet_col | inet | | | + cidr_col | cidr | | | + macaddr_col | macaddr | | | + bit_col | bit(8) | | | + varbit_col | bit varying(8) | | | + box_col | box | | | + circle_col | circle | | | + line_col | line | | | + lseg_col | lseg | | | + path_col | path | | | + polygon_col | polygon | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) @@ -755,7 +755,7 @@ SELECT * FROM get_table_repset_info('employee_projects'); discontinued | boolean | | | | plain | | | product_description | text | | | | extended | | | added | timestamp without time zone | | | | plain | | | - updated | timestamp with time zone | | | | plain | | | + updated | timestamp without time zone | | | | plain | | | category | character varying(50) | | | | extended | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -803,23 +803,23 @@ SELECT * FROM get_table_repset_info('test_tab1'); (1 row) \d+ test_tab2 - Table "s610.test_tab2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- - id | integer | | not null | | plain | | | - timestamp_col | timestamp with time zone | | | | plain | | | - interval_col | interval | | | | plain | | | - inet_col | inet | | | | main | | | - cidr_col | cidr | | | | main | | | - macaddr_col | macaddr | | | | plain | | | - bit_col | bit(8) | | | | extended | | | - varbit_col | bit varying(8) | | | | extended | | | - box_col | box | | | | plain | | | - circle_col | circle | | | | plain | | | - line_col | line | | | | plain | | | - lseg_col | lseg | | | | plain | | | - path_col | path | | | | extended | | | - polygon_col | polygon | | | | extended | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +---------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- + id | integer | | not null | | plain | | | + timestamp_col | timestamp without time zone | | | | plain | | | + interval_col | interval | | | | plain | | | + inet_col | inet | | | | main | | | + cidr_col | cidr | | | | main | | | + macaddr_col | macaddr | | | | plain | | | + bit_col | bit(8) | | | | extended | | | + varbit_col | bit varying(8) | | | | extended | | | + box_col | box | | | | plain | | | + circle_col | circle | | | | plain | | | + line_col | line | | | | plain | | | + lseg_col | lseg | | | | plain | | | + path_col | path | | | | extended | | | + polygon_col | polygon | | | | extended | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap @@ -921,10 +921,10 @@ SELECT * FROM employee_projects ORDER BY emp_id, project_id; (3 rows) SELECT * FROM products ORDER BY product_id; - product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category -------------+--------------+-------+----------------+--------------+--------------------------+---------------------+------------------------+---------- - 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 17:00:00+05 | - 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 20:00:00+05 | + product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category +------------+--------------+-------+----------------+--------------+--------------------------+---------------------+---------------------+---------- + 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 12:00:00 | + 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 15:00:00 | (2 rows) SELECT * FROM "CaseSensitiveTable" ORDER BY "ID"; @@ -941,9 +941,9 @@ SELECT * FROM test_tab1 ORDER BY id; (1 row) SELECT * FROM test_tab2 ORDER BY id; - id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col -----+------------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- - 1 | 2023-01-01 17:00:00+05 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) + id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col +----+---------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- + 1 | 2023-01-01 12:00:00 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) (1 row) SELECT * FROM test_tab3 ORDER BY id; diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql index b705d96..28dc15c 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql @@ -127,7 +127,7 @@ CREATE TABLE products ( discontinued BOOLEAN, product_description TEXT, added TIMESTAMP WITHOUT TIME ZONE, - updated TIMESTAMPTZ + updated TIMESTAMP WITHOUT TIME ZONE ); -- Insert initial data into products table @@ -188,7 +188,7 @@ SELECT * FROM get_table_repset_info('test_tab1'); -- Create table to test more data types and constraints CREATE TABLE test_tab2 ( id INT PRIMARY KEY, - timestamp_col TIMESTAMPTZ, + timestamp_col TIMESTAMP WITHOUT TIME ZONE, interval_col INTERVAL, inet_col INET, cidr_col CIDR, diff --git a/t/auto_ddl/6100b_table_validate_and_drop_n2.out b/t/auto_ddl/6100b_table_validate_and_drop_n2.out index 00d852b..678b58f 100644 --- a/t/auto_ddl/6100b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6100b_table_validate_and_drop_n2.out @@ -127,7 +127,7 @@ SELECT * FROM get_table_repset_info('employee_projects'); discontinued | boolean | | | | plain | | | product_description | text | | | | extended | | | added | timestamp without time zone | | | | plain | | | - updated | timestamp with time zone | | | | plain | | | + updated | timestamp without time zone | | | | plain | | | category | character varying(50) | | | | extended | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -175,23 +175,23 @@ SELECT * FROM get_table_repset_info('test_tab1'); (1 row) \d+ test_tab2 - Table "s610.test_tab2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- - id | integer | | not null | | plain | | | - timestamp_col | timestamp with time zone | | | | plain | | | - interval_col | interval | | | | plain | | | - inet_col | inet | | | | main | | | - cidr_col | cidr | | | | main | | | - macaddr_col | macaddr | | | | plain | | | - bit_col | bit(8) | | | | extended | | | - varbit_col | bit varying(8) | | | | extended | | | - box_col | box | | | | plain | | | - circle_col | circle | | | | plain | | | - line_col | line | | | | plain | | | - lseg_col | lseg | | | | plain | | | - path_col | path | | | | extended | | | - polygon_col | polygon | | | | extended | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +---------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- + id | integer | | not null | | plain | | | + timestamp_col | timestamp without time zone | | | | plain | | | + interval_col | interval | | | | plain | | | + inet_col | inet | | | | main | | | + cidr_col | cidr | | | | main | | | + macaddr_col | macaddr | | | | plain | | | + bit_col | bit(8) | | | | extended | | | + varbit_col | bit varying(8) | | | | extended | | | + box_col | box | | | | plain | | | + circle_col | circle | | | | plain | | | + line_col | line | | | | plain | | | + lseg_col | lseg | | | | plain | | | + path_col | path | | | | extended | | | + polygon_col | polygon | | | | extended | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap @@ -293,10 +293,10 @@ SELECT * FROM employee_projects ORDER BY emp_id, project_id; (3 rows) SELECT * FROM products ORDER BY product_id; - product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category -------------+--------------+-------+----------------+--------------+--------------------------+---------------------+------------------------+---------- - 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 17:00:00+05 | - 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 20:00:00+05 | + product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category +------------+--------------+-------+----------------+--------------+--------------------------+---------------------+---------------------+---------- + 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 12:00:00 | + 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 15:00:00 | (2 rows) SELECT * FROM "CaseSensitiveTable" ORDER BY "ID"; @@ -313,9 +313,9 @@ SELECT * FROM test_tab1 ORDER BY id; (1 row) SELECT * FROM test_tab2 ORDER BY id; - id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col -----+------------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- - 1 | 2023-01-01 17:00:00+05 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) + id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col +----+---------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- + 1 | 2023-01-01 12:00:00 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) (1 row) SELECT * FROM test_tab3 ORDER BY id; diff --git a/t/lib/config.env b/t/lib/config.env index a795258..c6c925d 100644 --- a/t/lib/config.env +++ b/t/lib/config.env @@ -43,4 +43,4 @@ export EDGE_CLI="pgedge" export EDGE_ACTUAL_OUT_DIR="/tmp/auto_ddl/" # To ensure locale related outputs (such as monetary values) stay consistent -export LC_ALL="en_US.UTF-8" +export LC_ALL="C.UTF-8" From d407d7b0dad0f56eefbc88139457d330728b3bd3 Mon Sep 17 00:00:00 2001 From: "A. Hayee Bhatti" Date: Wed, 30 Oct 2024 20:35:57 +0500 Subject: [PATCH 36/48] [AutoDDL] Regression stabilisations continued (minor adjustments around database encoding) --- t/auto_ddl/6666a_all_objects_create_n1.out | 20 ++++++++----------- t/auto_ddl/6666a_all_objects_create_n1.sql | 8 ++------ ...6666b_all_objects_validate_and_drop_n2.out | 18 +++++++---------- ...6666b_all_objects_validate_and_drop_n2.sql | 8 ++------ t/auto_ddl/6666c_all_objects_validate_n1.out | 16 ++++++--------- t/auto_ddl/6666c_all_objects_validate_n1.sql | 8 ++------ 6 files changed, 27 insertions(+), 51 deletions(-) diff --git a/t/auto_ddl/6666a_all_objects_create_n1.out b/t/auto_ddl/6666a_all_objects_create_n1.out index 30bdffd..0ed52b5 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.out +++ b/t/auto_ddl/6666a_all_objects_create_n1.out @@ -275,16 +275,14 @@ RESET SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE datname = 'obj_database'; - name | owner | encoding | collate | ctype ---------------+-----------+----------+-------------+------------- - obj_database | adminuser | UTF8 | en_US.UTF-8 | en_US.UTF-8 + name | owner | encoding +--------------+-----------+---------- + obj_database | adminuser | UTF8 (1 row) -- Validate extension @@ -377,18 +375,16 @@ Publications: -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN pg_namespace n ON n.oid = c.collnamespace WHERE c.collname = 'obj_collation'; - schema | name | collate | ctype ---------+---------------+---------+------- - s1 | obj_collation | C | C + schema | name +--------+--------------- + s1 | obj_collation (1 row) -- Validate conversion diff --git a/t/auto_ddl/6666a_all_objects_create_n1.sql b/t/auto_ddl/6666a_all_objects_create_n1.sql index b6bba6f..99310ec 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.sql +++ b/t/auto_ddl/6666a_all_objects_create_n1.sql @@ -192,9 +192,7 @@ RESET ROLE; SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE @@ -237,9 +235,7 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out index 3ce8e4f..d1f86c4 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out @@ -10,15 +10,13 @@ SELECT pg_sleep(2);--to ensure all objects are replicated SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE datname = 'obj_database'; - name | owner | encoding | collate | ctype -------+-------+----------+---------+------- + name | owner | encoding +------+-------+---------- (0 rows) -- Validate extension @@ -112,18 +110,16 @@ Publications: -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN pg_namespace n ON n.oid = c.collnamespace WHERE c.collname = 'obj_collation'; - schema | name | collate | ctype ---------+---------------+---------+------- - s1 | obj_collation | C | C + schema | name +--------+--------------- + s1 | obj_collation (1 row) -- Validate conversion diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql index 6fd3eca..6ff7a7b 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql @@ -6,9 +6,7 @@ SELECT pg_sleep(2);--to ensure all objects are replicated SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE @@ -51,9 +49,7 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.out b/t/auto_ddl/6666c_all_objects_validate_n1.out index b1d772c..4759ba4 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.out +++ b/t/auto_ddl/6666c_all_objects_validate_n1.out @@ -22,15 +22,13 @@ DROP SUBSCRIPTION SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE datname = 'obj_database'; - name | owner | encoding | collate | ctype -------+-------+----------+---------+------- + name | owner | encoding +------+-------+---------- (0 rows) -- Validate extension @@ -112,17 +110,15 @@ List of user mappings -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN pg_namespace n ON n.oid = c.collnamespace WHERE c.collname = 'obj_collation'; - schema | name | collate | ctype ---------+------+---------+------- + schema | name +--------+------ (0 rows) -- Validate conversion diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.sql b/t/auto_ddl/6666c_all_objects_validate_n1.sql index 6552ae0..8a5ba9a 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.sql +++ b/t/auto_ddl/6666c_all_objects_validate_n1.sql @@ -14,9 +14,7 @@ DROP SUBSCRIPTION obj_subscription; SELECT datname AS name, pg_catalog.pg_get_userbyid(datdba) AS owner, - pg_catalog.pg_encoding_to_char(encoding) AS encoding, - datcollate AS collate, - datctype AS ctype + pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE @@ -58,9 +56,7 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate collation , using a query instead of meta command as there are catalog changes in pg17 SELECT n.nspname AS schema, - c.collname AS name, - c.collcollate AS collate, - c.collctype AS ctype + c.collname AS name FROM pg_collation c JOIN From f13af8cb0bf5ebac8febb99d86c6e8cf5ad3b3be Mon Sep 17 00:00:00 2001 From: Cloud User Date: Thu, 31 Oct 2024 19:38:14 +0000 Subject: [PATCH 37/48] Updated t/spock_create_sub_specify_repsets.py to include the command arguments in order; if specified out of order, the command fails --- t/spock_create_sub_specify_repsets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/t/spock_create_sub_specify_repsets.py b/t/spock_create_sub_specify_repsets.py index 75bcf7f..49f98b3 100644 --- a/t/spock_create_sub_specify_repsets.py +++ b/t/spock_create_sub_specify_repsets.py @@ -21,7 +21,9 @@ # Create a subscription with an array of repsets; this is the 'happy path' testcase. print("*"*100) -command = f"spock sub-create my_test_sub 'host={host} user={repuser} dbname={dbname} port={port}' {dbname} -r 'this_repset,that_repset,the_other_repset'" +## The arguments for the following command have to be in the same order as documented: + +command = f"spock sub-create my_test_sub 'host={host} port={port} user={repuser} dbname={dbname}' {dbname} -r 'this_repset,that_repset,the_other_repset'" res=util_test.run_cmd("Run spock sub-create -r.", command, f"{cluster_dir}/n1") print(f"Print our command here: {command}") print(f"Print res.stdout here: - {res.stdout}") From 2090136a69bcc89afa16394f14adb5b0a85297de Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 5 Nov 2024 17:45:37 +0000 Subject: [PATCH 38/48] Added checks to exception table cases to confirm that replication was still working; tweaked psql_read function in t/util_test.py to return content from query to spock sub-show-status in string format --- schedule_files/spock_4.0 | 4 +- t/cluster-init-2-node-cluster.py | 70 +++++++++++++++++++++++++++ t/spock_exception_table_case1.py | 83 ++++++++++++++++++++++++++++++-- t/spock_exception_table_case2.py | 60 +++++++++++++++++++++++ t/spock_exception_table_case3.py | 61 ++++++++++++++++++++++- t/spock_exception_table_case4.py | 60 +++++++++++++++++++++++ t/util_test.py | 2 +- 7 files changed, 332 insertions(+), 8 deletions(-) create mode 100644 t/cluster-init-2-node-cluster.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 12ea421..d866370 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -1,7 +1,7 @@ ## Spock repair mode functionality t/setup_01_install.py -t/cluster-init.py +t/cluster-init-2-node-cluster.py ## Spock 4.0 Scripts t/spock_repair_function.py @@ -14,4 +14,4 @@ t/spock_exception_table_case4.py ## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -#t/cleanup_03_remove_nc.py +t/cleanup_03_remove_nc.py diff --git a/t/cluster-init-2-node-cluster.py b/t/cluster-init-2-node-cluster.py new file mode 100644 index 0000000..bd75c1a --- /dev/null +++ b/t/cluster-init-2-node-cluster.py @@ -0,0 +1,70 @@ +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +pgv=os.getenv("EDGE_INST_VERSION") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") +dbname=os.getenv("EDGE_DB","lcdb") + +cwd=os.getcwd() +num_nodes=2 + + +#print("*"*100) + +print(f"home_dir = {home_dir}\n") +command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") +print(f"res = {res}\n") + +new_ver = (f"{spockver}") +print(f"Spock new version is: {new_ver}") +new_path_0 = (f"{cwd}/{cluster_dir}/n1") +new_path_1 = (f"{cwd}/{cluster_dir}/n2") + + +with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: + data = json.load(file) + #print(data) + data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + +newdata = json.dumps(data, indent=4) +with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: + file.write(newdata) + +print(newdata) + +command = (f"cluster init {cluster_name}") +init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") +print(f"init = {init.stdout}\n") +print("*"*100) + + +# Needle and Haystack +# Confirm the command worked by looking for: + +if "\nSyntaxError" not in str(init.stdout) or init.returncode == 1: + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + + + diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case1.py index a4ba27b..60b9ef7 100644 --- a/t/spock_exception_table_case1.py +++ b/t/spock_exception_table_case1.py @@ -70,15 +70,46 @@ print("*"*100) ## Confirm with SELECT * FROM spock.tables. -row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.tables returns: {row7}") +res7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.tables returns: {res7}") +print("*"*100) + +## Confirm with SELECT * FROM spock.subscription. +(res198) = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port1,pw,usr) +print(f"The n1 select * from spock.subscriptions returns: {res198}") print("*"*100) ## Confirm with SELECT * FROM spock.tables on n2. -row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) -print(f"The n2 select * from spock.tables returns: {row8}") +res8 = util_test.read_psql("SELECT * FROM spock.tables;",host,dbname,port2,pw,usr) +print(f"The n2 select * from spock.tables returns: {res8}") +print("*"*100) + +## Confirm with SELECT * FROM spock.subscription. +res298 = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port2,pw,usr) +print(f"The n1 select * from spock.subscriptions returns: {res298}") +print("*"*100) + +## Check spock sub_show_status on both nodes: + +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command199 = f"spock sub-show-status {sub_name} {dbname}" +res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") print("*"*100) +if "replicating" not in res199.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command299 = f"spock sub-show-status {sub_name} {dbname}" +res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") +print("*"*100) + +if "replicating" not in res299.stdout: + util_test.EXIT_FAIL() ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n1 that will not be replicated to n2 @@ -103,6 +134,28 @@ row2 = util_test.read_psql("SELECT * FROM case1",host,dbname,port2,pw,usr) print(row2) +## Check spock sub_show_status on both nodes: + +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command399 = f"spock sub-show-status {sub_name} {dbname}" +res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") +print("*"*100) + +if "replicating" not in res399.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command499 = f"spock sub-show-status {sub_name} {dbname}" +res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") +print("*"*100) + +if "replicating" not in res499.stdout: + util_test.EXIT_FAIL() + print("*"*100) ## Update the record that is out of sync, forcing a record into the exception table... @@ -125,6 +178,28 @@ print(f"On n2, our table contains: {row}") print("*"*100) +## Check spock sub_show_status on both nodes: + +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command599 = f"spock sub-show-status {sub_name} {dbname}" +res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") +print("*"*100) + +if "replicating" not in res599.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command699 = f"spock sub-show-status {sub_name} {dbname}" +res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") +print("*"*100) + +if "replicating" not in res699.stdout: + util_test.EXIT_FAIL() + ## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port1,pw,usr) print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case2.py index 9c00003..f515b10 100644 --- a/t/spock_exception_table_case2.py +++ b/t/spock_exception_table_case2.py @@ -69,6 +69,26 @@ print("*"*100) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command199 = f"spock sub-show-status {sub_name} {dbname}" +res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") +print("*"*100) + +if "replicating" not in res199.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command299 = f"spock sub-show-status {sub_name} {dbname}" +res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") +print("*"*100) + +if "replicating" not in res299.stdout: + util_test.EXIT_FAIL() + ## Confirm with SELECT * FROM spock.tables. row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) print(f"The n1 select * from spock.tables returns: {row7}") @@ -123,6 +143,26 @@ row2 = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr) print(f"We're in repair mode - n2 now contains 1/11/22/33: {row2}") +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command399 = f"spock sub-show-status {sub_name} {dbname}" +res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") +print("*"*100) + +if "replicating" not in res399.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command499 = f"spock sub-show-status {sub_name} {dbname}" +res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") +print("*"*100) + +if "replicating" not in res499.stdout: + util_test.EXIT_FAIL() + print("*"*100) ## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: @@ -145,6 +185,26 @@ print(f"bid 11 should be updated on n2, case2 contains: {row}") print("*"*100) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command599 = f"spock sub-show-status {sub_name} {dbname}" +res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") +print("*"*100) + +if "replicating" not in res599.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command699 = f"spock sub-show-status {sub_name} {dbname}" +res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") +print("*"*100) + +if "replicating" not in res699.stdout: + util_test.EXIT_FAIL() + ## Read from the spock.exception_log on n1 (the update of bid3 should be here); row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case2';",host,dbname,port1,pw,usr) print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case3.py index 22d6c26..d566912 100644 --- a/t/spock_exception_table_case3.py +++ b/t/spock_exception_table_case3.py @@ -71,6 +71,27 @@ res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") print(f"The repset-add-table command on n2 returns: {res6}") +## Check to make sure the cluster is replicating: +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command199 = f"spock sub-show-status {sub_name} {dbname}" +res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") +print("*"*100) + +if "replicating" not in res199.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command299 = f"spock sub-show-status {sub_name} {dbname}" +res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") +print("*"*100) + +if "replicating" not in res299.stdout: + util_test.EXIT_FAIL() + print("*"*100) ## Confirm with SELECT relname FROM spock.tables. @@ -138,11 +159,29 @@ row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) print(f"Node n2 should contain bid 1/11/22: {row2}") +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command399 = f"spock sub-show-status {sub_name} {dbname}" +res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") +print("*"*100) + +if "replicating" not in res399.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command499 = f"spock sub-show-status {sub_name} {dbname}" +res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") +print("*"*100) + +if "replicating" not in res499.stdout: + util_test.EXIT_FAIL() ## Check the results from the statement above, and you can see the duplicate primary key error ## is not being caught. Fix this when the patch is in. - ## Read from the spock.exception_log on n1; row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") @@ -163,6 +202,26 @@ print(f" n2 pgbench branches contains: {row}") print("*"*100) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command599 = f"spock sub-show-status {sub_name} {dbname}" +res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") +print("*"*100) + +if "replicating" not in res599.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command699 = f"spock sub-show-status {sub_name} {dbname}" +res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") +print("*"*100) + +if "replicating" not in res699.stdout: + util_test.EXIT_FAIL() + ## Read from the spock.exception_log on n2 for our needle/haystack step: row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case3';",host,dbname,port2,pw,usr) print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py index 9c8ae79..8a41601 100644 --- a/t/spock_exception_table_case4.py +++ b/t/spock_exception_table_case4.py @@ -67,6 +67,26 @@ print(f"The repset-add-table command on n2 returns: {res6}") print("*"*100) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command199 = f"spock sub-show-status {sub_name} {dbname}" +res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") +print("*"*100) + +if "replicating" not in res199.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command299 = f"spock sub-show-status {sub_name} {dbname}" +res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") +print("*"*100) + +if "replicating" not in res299.stdout: + util_test.EXIT_FAIL() + ## Confirm with SELECT * FROM spock.tables. row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) print(f"The n1 select * from spock.tables returns: {row7}") @@ -93,6 +113,26 @@ n1enable = util_test.enable_autoddl(host, dbname, port1, pw, usr) n2enable = util_test.enable_autoddl(host, dbname, port2, pw, usr) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command399 = f"spock sub-show-status {sub_name} {dbname}" +res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") +print("*"*100) + +if "replicating" not in res399.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command499 = f"spock sub-show-status {sub_name} {dbname}" +res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") +print("*"*100) + +if "replicating" not in res499.stdout: + util_test.EXIT_FAIL() + ## Check our variable values; row = util_test.read_psql("SELECT name, setting FROM pg_settings WHERE NAME LIKE 'spock.%'",host,dbname,port1,pw,usr) print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") @@ -119,6 +159,26 @@ print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) +## Checking spock sub_show_status on n1: +sub_name ="sub_n1n2" +command599 = f"spock sub-show-status {sub_name} {dbname}" +res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") +print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") +print("*"*100) + +if "replicating" not in res599.stdout: + util_test.EXIT_FAIL() + +## Checking spock sub_show_status on n2: +sub_name = "sub_n2n1" +command699 = f"spock sub-show-status {sub_name} {dbname}" +res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") +print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") +print("*"*100) + +if "replicating" not in res699.stdout: + util_test.EXIT_FAIL() + if 'ALTER TABLE case4 DROP COLUMN filler' in str(row): diff --git a/t/util_test.py b/t/util_test.py index 7664bc2..be1d10c 100644 --- a/t/util_test.py +++ b/t/util_test.py @@ -316,7 +316,7 @@ def read_psql(cmd,host,dbname,port,pw,usr,indent=None): cur = con.cursor() cur.execute(cmd) print(cmd) - ret = json.dumps(cur.fetchall(), indent=indent) + ret = json.dumps(cur.fetchall(), indent=indent, default=str) cur.close() except Exception as e: exit_message(e) From 39bda541d96db3a8367319dcbd85ab3fa1233e91 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 6 Nov 2024 20:42:21 +0000 Subject: [PATCH 39/48] Added notes to spock_exception_table test/updated schedule to update when feature is implemented --- schedule_files/spock_4.0 | 2 +- t/spock_exception_table_case4.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index d866370..a7db7ee 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -9,7 +9,7 @@ t/spock_repair_function.py t/spock_exception_table_case1.py t/spock_exception_table_case2.py t/spock_exception_table_case3.py -t/spock_exception_table_case4.py +#t/spock_exception_table_case4.py ## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py index 8a41601..472465e 100644 --- a/t/spock_exception_table_case4.py +++ b/t/spock_exception_table_case4.py @@ -179,12 +179,12 @@ if "replicating" not in res699.stdout: util_test.EXIT_FAIL() - -if 'ALTER TABLE case4 DROP COLUMN filler' in str(row): +print(f"This case is failing by design") +if 'This test case needs to be updated when functionality is added to Spock' in str(row): - util_test.EXIT_PASS() -else: util_test.EXIT_FAIL() +else: + util_test.EXIT_PASS() util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) From dc7fd50ce919dd0b443fd54bbb702401753806d5 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 13 Nov 2024 16:46:36 +0000 Subject: [PATCH 40/48] Implement hack from Hayee to setup two parallel subscriptions in spock_3_sub_create_parallel.py - this isn't really a long-term solution, but we can use it for testing for now --- t/spock_3_sub_create_parallel.py | 70 ++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 t/spock_3_sub_create_parallel.py diff --git a/t/spock_3_sub_create_parallel.py b/t/spock_3_sub_create_parallel.py new file mode 100644 index 0000000..f191c16 --- /dev/null +++ b/t/spock_3_sub_create_parallel.py @@ -0,0 +1,70 @@ +# This test case (and the other spock_# tests) expect to be run against a two node cluster. +# If it fails with an error: pg_reload_conf \n----------------\n t\n(1 row)\n\nSet GUC snowflake.node to 1\n[\n {\n ... +# you are probably running against a 3 node cluster. +# Per conversation with Cady, we may want to use a new setup script written in .py that uses the same +# logic as 8000a/8000b, but that uses the environment variable values. + +import os, util_test, subprocess + +## Get Test Settings +util_test.set_env() + +def run(): + # Get environment variables + num_nodes = int(os.getenv("EDGE_NODES", 2)) + cluster_dir = os.getenv("EDGE_CLUSTER_DIR") + port=int(os.getenv("EDGE_START_PORT",6432)) + repuser=os.getenv("EDGE_REPUSER","pgedge") + pw=os.getenv("EDGE_PASSWORD","lcpasswd") + db=os.getenv("EDGE_DB","lcdb") + host=os.getenv("EDGE_HOST","localhost") + spock_delay=os.getenv("SPOCK_DELAY", None) + + parallel_array = [1,2] + + port_array = [] + for n in range(1,num_nodes+1): + port_array.append(port) + port = port + 1 + + for n in range(1,num_nodes+1): + for z in range(1,num_nodes+1): + for p in parallel_array: + if n!=z: + ## Create Subs + cmd_node = f"spock sub-create sub_n{n}n{z}_{p} 'host=127.0.0.1 port={port_array[z-1]} user={repuser} dbname={db}' {db}" + + if spock_delay is not None: + try: + spock_delay = int(spock_delay) + cmd_node += f" -a={spock_delay}" + + except Exception as e: + print(f"Error in getting spock_delay: {e}") + + res=util_test.run_cmd("Sub Create", cmd_node, f"{cluster_dir}/n{n}") + print(res) + if res.returncode == 1 or "sub_create" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Sub Create", 1) + + print(f"Line 49, print db: {db}") + + ## Sub Show Status Test + cmd_node = (f"spock sub-show-status sub_n1n2_1 {db}") + res=util_test.run_cmd("Sub Show Status", cmd_node, f"{cluster_dir}/n1") + print(res) + if res.returncode == 1 or "replicating" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Sub Show Status", 1) + + ## Node List Test + cmd_node = f"spock node-list {db}" + res=util_test.run_cmd("Node List", cmd_node, f"{cluster_dir}/n1") + print(res) + if res.returncode == 1 or "n2" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Node List", 1) + +if __name__ == "__main__": + ## Print Script + print(f"Starting - {os.path.basename(__file__)}") + run() + util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) From 138b8349dd5f56cf27bc16e172b9c8662630122e Mon Sep 17 00:00:00 2001 From: Cloud User Date: Fri, 15 Nov 2024 15:32:15 +0000 Subject: [PATCH 41/48] Updated test cases to add flexibility for new GUCS --- t/spock_exception_table_case1.py | 267 +++++++++++++------------------ t/spock_exception_table_case2.py | 245 ++++++++++++++-------------- 2 files changed, 227 insertions(+), 285 deletions(-) diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case1.py index 60b9ef7..7be8ebc 100644 --- a/t/spock_exception_table_case1.py +++ b/t/spock_exception_table_case1.py @@ -1,4 +1,4 @@ -import sys, os, util_test,subprocess +import sys, os, util_test, subprocess, time ## Print Script print(f"Starting - {os.path.basename(__file__)}") @@ -18,99 +18,82 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") - +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") port2=port1+1 -print(port2) - -print("*"*100) nc_dir=os.getenv("NC_DIR","nc") -print(nc_dir) home_dir = os.getenv("EDGE_HOME_DIR") -print(home_dir) - -# Check the information from cluster list-nodes. -# -command = (f"cluster list-nodes demo") -res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") -print(f"Command: {command}") -print(f"The list-nodes command returns = {res}\n") -print("*"*100) - -## Setup on n1: -## Create a table: -command1 = "CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) - -## Add a row: -command2 = "INSERT INTO case1 VALUES (1, 11111, 'filler')" -print(f"{command2}") -row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) - -## Add it to the default repset: -command3 = f"spock repset-add-table default case1 {dbname}" -res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") -print(f"The repset-add-table command on n1 returns: {res3}") - -print("*"*100) - -## Setup on n2: -## Create a table: -command4 = "CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) - -## Add a row: -command5 = "INSERT INTO case1 VALUES (1, 11111, 'filler')" -row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) - -## Add it to the default repset: -command6 = f"spock repset-add-table default case1 {dbname}" -res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") -print(f"The repset-add-table command on n2 returns: {res6}") -print("*"*100) - -## Confirm with SELECT * FROM spock.tables. -res7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.tables returns: {res7}") -print("*"*100) - -## Confirm with SELECT * FROM spock.subscription. -(res198) = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.subscriptions returns: {res198}") -print("*"*100) - -## Confirm with SELECT * FROM spock.tables on n2. -res8 = util_test.read_psql("SELECT * FROM spock.tables;",host,dbname,port2,pw,usr) -print(f"The n2 select * from spock.tables returns: {res8}") -print("*"*100) - -## Confirm with SELECT * FROM spock.subscription. -res298 = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port2,pw,usr) -print(f"The n1 select * from spock.subscriptions returns: {res298}") -print("*"*100) - -## Check spock sub_show_status on both nodes: - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command199 = f"spock sub-show-status {sub_name} {dbname}" -res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") -print("*"*100) - -if "replicating" not in res199.stdout: - util_test.EXIT_FAIL() -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command299 = f"spock sub-show-status {sub_name} {dbname}" -res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") print("*"*100) -if "replicating" not in res299.stdout: - util_test.EXIT_FAIL() - +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case1 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case1 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Test Steps ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n1 that will not be replicated to n2 @@ -122,83 +105,53 @@ END $$; """ -print(anon_block) +print(f"Executing the anonymous block: anon_block") row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) print(row) ## Look for our row on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM case1",host,dbname,port1,pw,usr) -print(row1) - -row2 = util_test.read_psql("SELECT * FROM case1",host,dbname,port2,pw,usr) -print(row2) - -## Check spock sub_show_status on both nodes: - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command399 = f"spock sub-show-status {sub_name} {dbname}" -res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") -print("*"*100) - -if "replicating" not in res399.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command499 = f"spock sub-show-status {sub_name} {dbname}" -res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") -print("*"*100) - -if "replicating" not in res499.stdout: - util_test.EXIT_FAIL() - -print("*"*100) - +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case1;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: ## Update the record that is out of sync, forcing a record into the exception table... row = util_test.write_psql("UPDATE case1 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) -#print(f"The update to bid 2 returns: {row}") +print(f"TEST STEP: The update to bid 2 returns: {row}") print("*"*100) -## Demonstrate that replication continues on n1 +## Demonstrate that replication continues on n1: row = util_test.write_psql("UPDATE case1 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) -#print(f"The update to bid 1 on n1 returns: {row}") -print("*"*100) - -## Show that the row update made it to n1 without causing a death spiral: -row = util_test.read_psql("SELECT * FROM case1",host,dbname,port1,pw,usr) -print(f"On n1, our table contains: {row}") -print("*"*100) - -## Show that the row update made it to n2 without a death spiral: -row = util_test.read_psql("SELECT * FROM case1",host,dbname,port2,pw,usr) -print(f"On n2, our table contains: {row}") -print("*"*100) - -## Check spock sub_show_status on both nodes: - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command599 = f"spock sub-show-status {sub_name} {dbname}" -res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") -print("*"*100) - -if "replicating" not in res599.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command699 = f"spock sub-show-status {sub_name} {dbname}" -res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") -print("*"*100) - -if "replicating" not in res699.stdout: - util_test.EXIT_FAIL() +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case1;",host,dbname,port,pw,usr) + print(f"SELECT * from case1 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() ## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port1,pw,usr) @@ -208,9 +161,9 @@ if '[]' not in str(row1): util_test.EXIT_FAIL() -## Read from the spock.exception_log; +## Confirm the test results from the spock.exception_log: row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") +print(f"TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {row}") print("*"*100) if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case2.py index f515b10..65282a9 100644 --- a/t/spock_exception_table_case2.py +++ b/t/spock_exception_table_case2.py @@ -18,6 +18,7 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") port2=port1+1 print(port2) @@ -36,89 +37,96 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) -## Setup on n1: -## Create a table: -command1 = "CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) - -## Add a row: -command2 = "INSERT INTO case2 VALUES (1, 11111, 'filler')" -print(f"{command2}") -row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) - -## Add it to the default repset: -command3 = f"spock repset-add-table default case2 {dbname}" -res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") -print(f"The repset-add-table command on n1 returns: {res3}") - -print("*"*100) - -## Setup on n2: -## Create a table: -command4 = "CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) - -## Add a row: -command5 = "INSERT INTO case2 VALUES (1, 11111, 'filler')" -row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) - -## Add it to the default repset: -command6 = f"spock repset-add-table default case2 {dbname}" -res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") -print(f"The repset-add-table command on n2 returns: {res6}") - -print("*"*100) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command199 = f"spock sub-show-status {sub_name} {dbname}" -res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") -print("*"*100) - -if "replicating" not in res199.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command299 = f"spock sub-show-status {sub_name} {dbname}" -res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") -print("*"*100) - -if "replicating" not in res299.stdout: - util_test.EXIT_FAIL() - -## Confirm with SELECT * FROM spock.tables. -row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.tables returns: {row7}") -print("*"*100) - -## Confirm with SELECT * FROM spock.tables on n2. -row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) -print(f"The n2 select * from spock.tables returns: {row8}") -print("*"*100) +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case2 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case2 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"{n} is the value in n") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() ## Add two rows that should be replicated from n1 to n2: row = util_test.write_psql("INSERT INTO case2 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) -print(f"We inserted bid 11 on n1: {row}") +print(f"TEST STEP: We inserted bid 11 on n1: {row}") print("*"*100) row = util_test.write_psql("INSERT INTO case2 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) -print(f"We inserted bid 22 on n1: {row}") -print("*"*100) - -## Look for our rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM case2",host,dbname,port1,pw,usr) -print(f"Node n1 should contain bid 1/11/22: {row1}") - -row2 = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr) -print(f"Node n2 should contain bid 1/11/22: {row2}") - -print("*"*100) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case2;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n2 that will not be replicated to n1: @@ -137,80 +145,61 @@ ## Check the rows on n1 and n2: -row1 = util_test.read_psql("SELECT * FROM case2",host,dbname,port1,pw,usr) -print(f"We're in repair mode - n1 now contains 1/11/22: {row1}") - -row2 = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr) -print(f"We're in repair mode - n2 now contains 1/11/22/33: {row2}") - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command399 = f"spock sub-show-status {sub_name} {dbname}" -res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") -print("*"*100) - -if "replicating" not in res399.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command499 = f"spock sub-show-status {sub_name} {dbname}" -res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") -print("*"*100) - -if "replicating" not in res499.stdout: - util_test.EXIT_FAIL() +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case2;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() -print("*"*100) +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") ## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: row = util_test.write_psql("UPDATE case2 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) -print(f"We're in repair mode - the update to bid 33 on n2 returns: {row}") +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") print("*"*100) -## Read from the spock.exception_log on n1; -#row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") -#print(f"SELECT * FROM spock.exception_log returns: {row}") -#print("*"*100) - ## Demonstrate that replication continues row = util_test.write_psql("UPDATE case2 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) -print(f"The update to bid 11 on n1 returns: {row}") +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") print("*"*100) ## Show that the row update made it to n2 without causing a death spiral: row = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr).strip("[]") -print(f"bid 11 should be updated on n2, case2 contains: {row}") -print("*"*100) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command599 = f"spock sub-show-status {sub_name} {dbname}" -res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") +print(f"TEST STEP: bid 11 should be updated on n2, case2 contains: {row}") print("*"*100) -if "replicating" not in res599.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command699 = f"spock sub-show-status {sub_name} {dbname}" -res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") -print("*"*100) +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() -if "replicating" not in res699.stdout: - util_test.EXIT_FAIL() ## Read from the spock.exception_log on n1 (the update of bid3 should be here); row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case2';",host,dbname,port1,pw,usr) print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") print("*"*100) - if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): util_test.EXIT_PASS() From ab00036d33cf4395588884fefd166c5405632abc Mon Sep 17 00:00:00 2001 From: Cloud User Date: Fri, 15 Nov 2024 16:07:37 +0000 Subject: [PATCH 42/48] Updated exception table test cases to add flexibility for new GUCS --- schedule_files/spock_4.0 | 4 +- t/spock_exception_table_case3.py | 250 +++++++++++++------------------ t/spock_exception_table_case4.py | 191 ----------------------- 3 files changed, 101 insertions(+), 344 deletions(-) delete mode 100644 t/spock_exception_table_case4.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index a7db7ee..73403e1 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -4,12 +4,10 @@ t/setup_01_install.py t/cluster-init-2-node-cluster.py ## Spock 4.0 Scripts -t/spock_repair_function.py -#t/spock_exception_table_case99.py +#t/spock_repair_function.py t/spock_exception_table_case1.py t/spock_exception_table_case2.py t/spock_exception_table_case3.py -#t/spock_exception_table_case4.py ## Remove components, Clean environment and free ports t/cleanup_01_node_remove.py diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case3.py index d566912..99f4eeb 100644 --- a/t/spock_exception_table_case3.py +++ b/t/spock_exception_table_case3.py @@ -18,6 +18,7 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") port2=port1+1 print(port2) @@ -30,96 +31,77 @@ # Check the information from cluster list-nodes. # -command = (f"cluster list-nodes demo") -res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") -print(f"Command: {command}") +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") print(f"The list-nodes command returns = {res}\n") print("*"*100) -## Setup on n1: -## Create a table: -command1 = "CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) -#print(f"The create table statement on n1 returns: {row1}") - -## Add a row: -command2 = "INSERT INTO case3 VALUES (1, 11111, 'filler')" -print(f"{command2}") -row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) -#print(f"The insert statement on n1 returns: {row2}") - -## Add it to the default repset: -command3 = f"spock repset-add-table default case3 {dbname}" -res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") -print(f"The repset-add-table command on n1 returns: {res3}") - -print("*"*100) - -## Setup on n2: -## Create a table: -command4 = "CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) -#print(f"The create table statement on n2 returns: {row4}") - -## Add a row: -command5 = "INSERT INTO case3 VALUES (1, 11111, 'filler')" -row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) -#print(f"The insert statement on n2 returns: {row5}") - -## Add it to the default repset: -command6 = f"spock repset-add-table default case3 {dbname}" -res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") -print(f"The repset-add-table command on n2 returns: {res6}") - -## Check to make sure the cluster is replicating: -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command199 = f"spock sub-show-status {sub_name} {dbname}" -res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") -print("*"*100) - -if "replicating" not in res199.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command299 = f"spock sub-show-status {sub_name} {dbname}" -res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") -print("*"*100) - -if "replicating" not in res299.stdout: - util_test.EXIT_FAIL() - -print("*"*100) - -## Confirm with SELECT relname FROM spock.tables. -row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.tables returns: {row7}") -print("*"*100) - -## Confirm with SELECT relname FROM spock.tables on n2. -row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) -print(f"The n2 select * from spock.tables returns: {row8}") -print("*"*100) +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case3 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case3 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + ## Check replication + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() ## Add one row that should be replicated from n1 to n2: row = util_test.write_psql("INSERT INTO case3 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) -print(f"We inserted bid 11 on n1: {row}") -print("*"*100) - -## Look for our rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) -print(f"Node n1 should contain bid 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) -print(f"Node n2 should contain bid 1/11: {row2}") +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() +print(f"Node n1 and n2 should both contain bid 1/11") print("*"*100) + ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n2 that will not be replicated to n1: @@ -135,49 +117,31 @@ row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) print(row) -## Check the rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) -print(f"We're in repair mode - n1 now contains 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) -print(f"We're in repair mode - n2 now contains 1/11/22: {row2}") - -print("*"*100) - ## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: row = util_test.write_psql("INSERT INTO case3 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) -print(f"We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") -print("*"*100) - -## Look for our rows on n1 and n2: - -row1 = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr) -print(f"Node n1 should contain bid 1/11: {row1}") - -row2 = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr) -print(f"Node n2 should contain bid 1/11/22: {row2}") - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command399 = f"spock sub-show-status {sub_name} {dbname}" -res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") -print("*"*100) - -if "replicating" not in res399.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command499 = f"spock sub-show-status {sub_name} {dbname}" -res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") -print("*"*100) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() -if "replicating" not in res499.stdout: - util_test.EXIT_FAIL() +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") ## Check the results from the statement above, and you can see the duplicate primary key error ## is not being caught. Fix this when the patch is in. @@ -192,42 +156,28 @@ print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") print("*"*100) -## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM case3",host,dbname,port1,pw,usr).strip("[]") -print(f" n1 pgbench branches contains: {row}") -print("*"*100) - -## Show that the row update hasn't caused a death spiral: -row = util_test.read_psql("SELECT * FROM case3",host,dbname,port2,pw,usr).strip("[]") -print(f" n2 pgbench branches contains: {row}") -print("*"*100) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command599 = f"spock sub-show-status {sub_name} {dbname}" -res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") -print("*"*100) - -if "replicating" not in res599.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command699 = f"spock sub-show-status {sub_name} {dbname}" -res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") -print("*"*100) - -if "replicating" not in res699.stdout: - util_test.EXIT_FAIL() +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() ## Read from the spock.exception_log on n2 for our needle/haystack step: row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case3';",host,dbname,port2,pw,usr) -print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") print("*"*100) - if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): util_test.EXIT_PASS() diff --git a/t/spock_exception_table_case4.py b/t/spock_exception_table_case4.py deleted file mode 100644 index 472465e..0000000 --- a/t/spock_exception_table_case4.py +++ /dev/null @@ -1,191 +0,0 @@ -import sys, os, util_test, subprocess, time - -## Print Script -print(f"Starting - {os.path.basename(__file__)}") - -## Get Test Settings -util_test.set_env() -# -repo=os.getenv("EDGE_REPO") -num_nodes=int(os.getenv("EDGE_NODES",2)) -cluster_dir=os.getenv("EDGE_CLUSTER_DIR") -port1=int(os.getenv("EDGE_START_PORT",6432)) -usr=os.getenv("EDGE_USERNAME","admin") -pw=os.getenv("EDGE_PASSWORD","password1") -db=os.getenv("EDGE_DB","demo") -host=os.getenv("EDGE_HOST","localhost") -repuser=os.getenv("EDGE_REPUSER","pgedge") -repset=os.getenv("EDGE_REPSET","demo-repset") -spockpath=os.getenv("EDGE_SPOCK_PATH") -dbname=os.getenv("EDGE_DB","lcdb") -seconds=int(os.getenv("EDGE_SLEEP")) - -port2 = port1+1 -print(port2) -nc_dir=os.getenv("NC_DIR","nc") -home_dir = os.getenv("EDGE_HOME_DIR") - -## Check the information from cluster list-nodes. -command = (f"cluster list-nodes demo") -res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") -print(f"Command: {command}") -print(f"The list-nodes command returns = {res}\n") -print("*"*100) - -## Setup on n1 -## Create a table with three columns: -command1 = "CREATE TABLE case4 (bid integer PRIMARY KEY, bbalance integer, filler character(88))" -row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) -#print(f"The create table statement on n1 returns: {row1}") - -## Add a row: -command2 = "INSERT INTO case4 VALUES (1, 11111, 'filler')" -print(f"{command2}") -row2 = util_test.write_psql(command2,host,dbname,port1,pw,usr) -#print(f"The insert statement on n1 returns: {row2}") - -## Add it to the default repset: -command3 = f"spock repset-add-table default case4 {dbname}" -res3=util_test.run_cmd("Adding our table to the default repset", command3, f"{cluster_dir}/n1") -print(f"The repset-add-table command on n1 returns: {res3}") -print("*"*100) - -## Setup on n2 -## Create a table with the same name as the table on n1, but with two columns: -command4 = "CREATE TABLE case4 (bid integer PRIMARY KEY, bbalance integer)" -row4 = util_test.write_psql(command4,host,dbname,port2,pw,usr) -#print(f"The create table statement on n2 returns: {row4}") - -## Add a row: -command5 = "INSERT INTO case4 VALUES (1, 11111)" -row5 = util_test.write_psql(command5,host,dbname,port2,pw,usr) -#print(f"The insert statement on n2 returns: {row5}") - -## Add it to the default repset: -command6 = f"spock repset-add-table default case4 {dbname}" -res6=util_test.run_cmd("Adding our table to the default repset", command6, f"{cluster_dir}/n2") -print(f"The repset-add-table command on n2 returns: {res6}") -print("*"*100) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command199 = f"spock sub-show-status {sub_name} {dbname}" -res199=util_test.run_cmd("Checking spock sub-show-status", command199, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res199}") -print("*"*100) - -if "replicating" not in res199.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command299 = f"spock sub-show-status {sub_name} {dbname}" -res299=util_test.run_cmd("Checking spock sub-show-status", command299, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res299}") -print("*"*100) - -if "replicating" not in res299.stdout: - util_test.EXIT_FAIL() - -## Confirm with SELECT * FROM spock.tables. -row7 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port1,pw,usr) -print(f"The n1 select * from spock.tables returns: {row7}") -print("*"*100) - -## Check the values in case4 on n1. -row7 = util_test.read_psql("SELECT * FROM case4;",host,dbname,port1,pw,usr) -print(f"The n1 select * from case4 returns: {row7}") -print("*"*100) - - -## Confirm with SELECT * FROM spock.tables on n2. -row8 = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port2,pw,usr) -print(f"The n2 select * from spock.tables returns: {row8}") -print("*"*100) - -## Check the values in case4 on n2. -row7 = util_test.read_psql("SELECT * FROM case4;",host,dbname,port2,pw,usr) -print(f"The n2 select * from case4 returns: {row7}") -print("*"*100) - - -## Enable AutoDDL (uses connection that allows ALTER SYSTEM SET) and reload configuration: -n1enable = util_test.enable_autoddl(host, dbname, port1, pw, usr) -n2enable = util_test.enable_autoddl(host, dbname, port2, pw, usr) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command399 = f"spock sub-show-status {sub_name} {dbname}" -res399=util_test.run_cmd("Checking spock sub-show-status", command399, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res399}") -print("*"*100) - -if "replicating" not in res399.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command499 = f"spock sub-show-status {sub_name} {dbname}" -res499=util_test.run_cmd("Checking spock sub-show-status", command499, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res499}") -print("*"*100) - -if "replicating" not in res499.stdout: - util_test.EXIT_FAIL() - -## Check our variable values; -row = util_test.read_psql("SELECT name, setting FROM pg_settings WHERE NAME LIKE 'spock.%'",host,dbname,port1,pw,usr) -print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") -print("*"*100) - -## Check our variable values; -row = util_test.read_psql("SELECT name, setting FROM pg_settings WHERE NAME LIKE 'spock.%'",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") -print("*"*100) - -## Drop the filler column from n1: -command1 = "ALTER TABLE case4 DROP COLUMN filler" -row1 = util_test.write_psql(command1,host,dbname,port1,pw,usr) -#print(f"We just dropped the filler column from n1: {row1}") -print("*"*100) - -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr) -print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") -print("*"*100) - -## Read from the spock.exception_log; -row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr) -print(f"SELECT * FROM spock.exception_log on n2 returns: {row}") -print("*"*100) - -## Checking spock sub_show_status on n1: -sub_name ="sub_n1n2" -command599 = f"spock sub-show-status {sub_name} {dbname}" -res599=util_test.run_cmd("Checking spock sub-show-status", command599, f"{cluster_dir}/n1") -print(f"The spock sub-show-status {sub_name} {dbname} command on n1 returns: {res599}") -print("*"*100) - -if "replicating" not in res599.stdout: - util_test.EXIT_FAIL() - -## Checking spock sub_show_status on n2: -sub_name = "sub_n2n1" -command699 = f"spock sub-show-status {sub_name} {dbname}" -res699=util_test.run_cmd("Checking spock sub-show-status", command699, f"{cluster_dir}/n2") -print(f"The spock sub-show-status {sub_name} {dbname} command on n2 returns: {res699}") -print("*"*100) - -if "replicating" not in res699.stdout: - util_test.EXIT_FAIL() - -print(f"This case is failing by design") -if 'This test case needs to be updated when functionality is added to Spock' in str(row): - - util_test.EXIT_FAIL() -else: - util_test.EXIT_PASS() - -util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) - - From d7bd820aaa375cc5443b79ad8ec25bbdc2f9524f Mon Sep 17 00:00:00 2001 From: Cloud User Date: Mon, 18 Nov 2024 15:32:29 +0000 Subject: [PATCH 43/48] Updated spock_exception_table test cases to accommodate testing multiple setting for exception handling GUCs --- ...=> spock_exception_table_case_discard1.py} | 4 +- ...=> spock_exception_table_case_discard2.py} | 4 +- ...=> spock_exception_table_case_discard3.py} | 4 +- t/spock_exception_table_case_sub-disable1.py | 177 +++++++++++++++ t/spock_exception_table_case_sub-disable2.py | 210 ++++++++++++++++++ t/spock_exception_table_case_sub-disable3.py | 189 ++++++++++++++++ t/spock_exception_table_case_transdiscard1.py | 176 +++++++++++++++ t/spock_exception_table_case_transdiscard2.py | 210 ++++++++++++++++++ t/spock_exception_table_case_transdiscard3.py | 189 ++++++++++++++++ 9 files changed, 1157 insertions(+), 6 deletions(-) rename t/{spock_exception_table_case1.py => spock_exception_table_case_discard1.py} (98%) rename t/{spock_exception_table_case2.py => spock_exception_table_case_discard2.py} (98%) rename t/{spock_exception_table_case3.py => spock_exception_table_case_discard3.py} (98%) create mode 100644 t/spock_exception_table_case_sub-disable1.py create mode 100644 t/spock_exception_table_case_sub-disable2.py create mode 100644 t/spock_exception_table_case_sub-disable3.py create mode 100644 t/spock_exception_table_case_transdiscard1.py create mode 100644 t/spock_exception_table_case_transdiscard2.py create mode 100644 t/spock_exception_table_case_transdiscard3.py diff --git a/t/spock_exception_table_case1.py b/t/spock_exception_table_case_discard1.py similarity index 98% rename from t/spock_exception_table_case1.py rename to t/spock_exception_table_case_discard1.py index 7be8ebc..36463fa 100644 --- a/t/spock_exception_table_case1.py +++ b/t/spock_exception_table_case_discard1.py @@ -33,8 +33,8 @@ ## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Set spock.exception_behaviour to discard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") ## Check server status after the reload status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) diff --git a/t/spock_exception_table_case2.py b/t/spock_exception_table_case_discard2.py similarity index 98% rename from t/spock_exception_table_case2.py rename to t/spock_exception_table_case_discard2.py index 65282a9..a5ca198 100644 --- a/t/spock_exception_table_case2.py +++ b/t/spock_exception_table_case_discard2.py @@ -39,8 +39,8 @@ for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Set spock.exception_behaviour to discard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") ## Check server status after the reload status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) diff --git a/t/spock_exception_table_case3.py b/t/spock_exception_table_case_discard3.py similarity index 98% rename from t/spock_exception_table_case3.py rename to t/spock_exception_table_case_discard3.py index 99f4eeb..94b0087 100644 --- a/t/spock_exception_table_case3.py +++ b/t/spock_exception_table_case_discard3.py @@ -37,8 +37,8 @@ for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Set spock.exception_behaviour to discard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") ## Check server status after the reload status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) diff --git a/t/spock_exception_table_case_sub-disable1.py b/t/spock_exception_table_case_sub-disable1.py new file mode 100644 index 0000000..5e77476 --- /dev/null +++ b/t/spock_exception_table_case_sub-disable1.py @@ -0,0 +1,177 @@ +import sys, os, util_test, subprocess, time + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") +port2=port1+1 +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + + +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to sub-disable: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case11 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case11 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case11 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Test Steps +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case11 VALUES (2, 70000, null); +END $$; +""" + +print(f"Executing the anonymous block: anon_block") +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case11;",host,dbname,port,pw,usr) + print(f"Line 120 - SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE case11 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1: +row = util_test.write_psql("UPDATE case11 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case11;",host,dbname,port,pw,usr) + print(f"SELECT * from case11 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 150 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case11';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + +## Confirm the test results from the spock.exception_log: +result = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case11';",host,dbname,port2,pw,usr) +print(f"Line 166 - TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {result}") +print("*"*100) + + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(result): + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_sub-disable2.py b/t/spock_exception_table_case_sub-disable2.py new file mode 100644 index 0000000..52c3aa5 --- /dev/null +++ b/t/spock_exception_table_case_sub-disable2.py @@ -0,0 +1,210 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to sub-disable: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case22 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case22 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case22 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"{n} is the value in n") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"Line 72 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case22 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO case22 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case22;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 124 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case22 VALUES (33, 33000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case22;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") + +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE case22 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE case22 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n2 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM case22",host,dbname,port2,pw,usr).strip("[]") +print(f"TEST STEP: bid 11 should be updated on n2, case22 contains: {row}") +print("*"*100) + +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 191 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case22';",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") +print("*"*100) + +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + diff --git a/t/spock_exception_table_case_sub-disable3.py b/t/spock_exception_table_case_sub-disable3.py new file mode 100644 index 0000000..45e659e --- /dev/null +++ b/t/spock_exception_table_case_sub-disable3.py @@ -0,0 +1,189 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to sub-disable: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case33 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case33 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case33 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + ## Check replication + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case33 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 and n2 should both contain bid 1/11") +print("*"*100) + + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case33 VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO case33 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 137 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 170 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case33';",host,dbname,port2,pw,usr) +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_transdiscard1.py b/t/spock_exception_table_case_transdiscard1.py new file mode 100644 index 0000000..dbddfa8 --- /dev/null +++ b/t/spock_exception_table_case_transdiscard1.py @@ -0,0 +1,176 @@ +import sys, os, util_test, subprocess, time + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") +port2=port1+1 +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + + +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case111 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case111 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case111 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Test Steps +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case111 VALUES (2, 70000, null); +END $$; +""" + +print(f"Executing the anonymous block: anon_block") +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case111;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE case111 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1: +row = util_test.write_psql("UPDATE case111 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case111;",host,dbname,port,pw,usr) + print(f"SELECT * from case111 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case111';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + +## Confirm the test results from the spock.exception_log: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case111';",host,dbname,port2,pw,usr) +print(f"TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_transdiscard2.py b/t/spock_exception_table_case_transdiscard2.py new file mode 100644 index 0000000..067f1f9 --- /dev/null +++ b/t/spock_exception_table_case_transdiscard2.py @@ -0,0 +1,210 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case222 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case222 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case222 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"{n} is the value in n") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case222 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO case222 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case222;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case222 VALUES (33, 33000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case222;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") + +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE case222 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE case222 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n2 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM case222",host,dbname,port2,pw,usr).strip("[]") +print(f"TEST STEP: bid 11 should be updated on n2, case222 contains: {row}") +print("*"*100) + +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case222';",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") +print("*"*100) + +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + diff --git a/t/spock_exception_table_case_transdiscard3.py b/t/spock_exception_table_case_transdiscard3.py new file mode 100644 index 0000000..95887ac --- /dev/null +++ b/t/spock_exception_table_case_transdiscard3.py @@ -0,0 +1,189 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +for n in range(num_nodes): + n=n+1 + ## Set spock.exception_behaviour to transdiscard: + res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_logging to all: + res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + ## Check server status after the reload + status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case333 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case333 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case333 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + ## Check replication + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case333 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 and n2 should both contain bid 1/11") +print("*"*100) + + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case333 VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO case333 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case333';",host,dbname,port2,pw,usr) +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + From 87add35b3a7fc0776917da0fb8633bc00995e4db Mon Sep 17 00:00:00 2001 From: Cloud User Date: Fri, 22 Nov 2024 20:43:26 +0000 Subject: [PATCH 44/48] Commit contains update to cluster schedule (cluster schedule works) as well as backup of spock_4.0 schedule and files (still WIP) --- schedule_files/cluster_schedule | 4 +- schedule_files/spock_4.0 | 17 +++++--- t/cluster-init-bad-json.py | 34 +++++++++++----- t/spock_exception_table_case_discard1.py | 28 ++++++++----- t/spock_exception_table_case_discard2.py | 27 +++++++++---- t/spock_exception_table_case_discard3.py | 27 +++++++++---- t/spock_exception_table_case_sub-disable1.py | 29 +++++++++----- t/spock_exception_table_case_sub-disable2.py | 27 +++++++++---- t/spock_exception_table_case_sub-disable3.py | 27 +++++++++---- t/spock_exception_table_case_transdiscard1.py | 39 ++++++++++--------- t/spock_exception_table_case_transdiscard2.py | 36 +++++++++-------- t/spock_exception_table_case_transdiscard3.py | 35 +++++++++-------- 12 files changed, 213 insertions(+), 117 deletions(-) diff --git a/schedule_files/cluster_schedule b/schedule_files/cluster_schedule index 9b39380..cceb476 100644 --- a/schedule_files/cluster_schedule +++ b/schedule_files/cluster_schedule @@ -22,9 +22,9 @@ t/cleanup_03_remove_nc.py ## Multi-node cluster tests t/setup_01_install.py -t/multi-db_cluster_setup.py +#t/multi-db_cluster_setup.py t/multi-db_cluster_exercise_ace.py ## Remove components, Clean environment and free ports -t/cleanup_01_node_remove.py +#t/cleanup_01_node_remove.py t/cleanup_03_remove_nc.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 73403e1..5461722 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -5,11 +5,18 @@ t/cluster-init-2-node-cluster.py ## Spock 4.0 Scripts #t/spock_repair_function.py -t/spock_exception_table_case1.py -t/spock_exception_table_case2.py -t/spock_exception_table_case3.py + +#t/spock_exception_table_case_discard1.py +#t/spock_exception_table_case_discard2.py +#t/spock_exception_table_case_discard3.py +#t/spock_exception_table_case_sub-disable1.py +#t/spock_exception_table_case_sub-disable2.py +#t/spock_exception_table_case_sub-disable3.py +t/spock_exception_table_case_transdiscard1.py +t/spock_exception_table_case_transdiscard2.py +t/spock_exception_table_case_transdiscard3.py ## Remove components, Clean environment and free ports -t/cleanup_01_node_remove.py +#t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/cleanup_03_remove_nc.py +#t/cleanup_03_remove_nc.py diff --git a/t/cluster-init-bad-json.py b/t/cluster-init-bad-json.py index c238150..7b37d18 100644 --- a/t/cluster-init-bad-json.py +++ b/t/cluster-init-bad-json.py @@ -1,4 +1,4 @@ -import sys, os, util_test,subprocess +import sys, os, util_test, subprocess, json # Print Script print(f"Starting - {os.path.basename(__file__)}") @@ -22,6 +22,7 @@ tmpcluster = "holdings" file_name = (f"{tmpcluster}.json") +cwd=os.getcwd() # # Use cluster json-template to create a template file: @@ -32,22 +33,35 @@ print(f"res = {res}\n") print("*"*100) -# + +# We're going to modify the path in our .json file so the new cluster lands in our nc directory: +new_path_0 = (f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}/n1") +new_path_1 = (f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}/n2") + +print(f"new_path_0: {new_path_0}") +print(f"new_path_1: {new_path_1}") + +with open(f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}.json", 'r') as file: + data = json.load(file) + print(data) + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + +newdata = json.dumps(data, indent=4) +with open(f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}.json", 'w') as file: + file.write(newdata) + # Use cluster init to initialize the cluster defined in the template file. # This will throw an error because both ports in the json file are the same. # command = (f"cluster init {tmpcluster}") -res1=util_test.run_nc_cmd("This command attempts to initialize the cluster", command, f"{home_dir}") -print(f"The attempt to initialize returns = {res1.returncode}\n") -print(f"The attempt to initialize the cluster should fail = {res1.stdout}\n") +res1=util_test.run_nc_cmd("This command initializes the cluster", command, f"{home_dir}") +print(f"res1.returncode contains = {res1.returncode}\n") +print(f"res1.stdout contains = {res1.stdout}\n") print("*"*100) -# Per Cady, the way the functionality is coded, it returns a 0 until we account for the errors. -# This seems a bit backwards, but we'll check for 0 and FAILED: -if res1.returncode == 0 and "FAILED" in str(res1.stdout): - print("This case should return: ERROR: Cannot install over a non-empty 'pgedge' directory.") - print("The JSON file is unmodified, so it installs twice into the same port") +if res1.returncode == 0 and "FAILED" not in str(res1.stdout): util_test.EXIT_PASS() else: diff --git a/t/spock_exception_table_case_discard1.py b/t/spock_exception_table_case_discard1.py index 36463fa..53bcf93 100644 --- a/t/spock_exception_table_case_discard1.py +++ b/t/spock_exception_table_case_discard1.py @@ -33,15 +33,25 @@ ## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to discard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_discard2.py b/t/spock_exception_table_case_discard2.py index a5ca198..caa9f2c 100644 --- a/t/spock_exception_table_case_discard2.py +++ b/t/spock_exception_table_case_discard2.py @@ -37,17 +37,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to discard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 46 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 49 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 53 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 58 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 61 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_discard3.py b/t/spock_exception_table_case_discard3.py index 94b0087..54cbd03 100644 --- a/t/spock_exception_table_case_discard3.py +++ b/t/spock_exception_table_case_discard3.py @@ -35,17 +35,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to discard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_sub-disable1.py b/t/spock_exception_table_case_sub-disable1.py index 5e77476..bfcdcd1 100644 --- a/t/spock_exception_table_case_sub-disable1.py +++ b/t/spock_exception_table_case_sub-disable1.py @@ -33,15 +33,26 @@ ## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to sub-disable: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_sub-disable2.py b/t/spock_exception_table_case_sub-disable2.py index 52c3aa5..65ba2fc 100644 --- a/t/spock_exception_table_case_sub-disable2.py +++ b/t/spock_exception_table_case_sub-disable2.py @@ -37,17 +37,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to sub-disable: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 47 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 50 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 57 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 59 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 62 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_sub-disable3.py b/t/spock_exception_table_case_sub-disable3.py index 45e659e..5f2d0d4 100644 --- a/t/spock_exception_table_case_sub-disable3.py +++ b/t/spock_exception_table_case_sub-disable3.py @@ -35,17 +35,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to sub-disable: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'sub-disable', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: diff --git a/t/spock_exception_table_case_transdiscard1.py b/t/spock_exception_table_case_transdiscard1.py index dbddfa8..eb6d9a3 100644 --- a/t/spock_exception_table_case_transdiscard1.py +++ b/t/spock_exception_table_case_transdiscard1.py @@ -33,15 +33,25 @@ ## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: @@ -66,10 +76,9 @@ status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") print("*"*100) + print(f"The port before adding 1 is: {port}") port = port + 1 - - if "replicating" not in status.stdout: - util_test.EXIT_FAIL() + print(f"The port after adding 1 is: {port}") print("Confirming the configuration") ## Confirm the configuration: @@ -85,14 +94,8 @@ result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) print(f"SELECT * from spock.subscriptions returns: {result}") print("*"*100) - ## Confirm with spock sub-show-status - status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") - print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") port = port + 1 - if "replicating" not in status.stdout: - util_test.EXIT_FAIL() - ## Test Steps ## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will ## add a row to n1 that will not be replicated to n2 diff --git a/t/spock_exception_table_case_transdiscard2.py b/t/spock_exception_table_case_transdiscard2.py index 067f1f9..5aa805d 100644 --- a/t/spock_exception_table_case_transdiscard2.py +++ b/t/spock_exception_table_case_transdiscard2.py @@ -37,17 +37,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 46 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 49 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 53 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 58 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 61 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: @@ -63,19 +74,12 @@ ## Add it to the default repset: result=util_test.run_cmd("comment", f"spock repset-add-table default case222 {dbname}", f"{cluster_dir}/n{n}") print(f"The repset-add-table command on n{n} returns: {result.stdout}") - ## Confirm with SELECT * FROM spock.tables. + ## Confirm with SELECT relname FROM spock.tables. result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) print(f"SELECT * from spock.tables returns: {result}") - ## Check replication - print(f"{n} is the value in n") - status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") - print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") print("*"*100) port = port + 1 - if "replicating" not in status.stdout: - util_test.EXIT_FAIL() - print("Confirming the configuration") ## Confirm the configuration: port = port1 diff --git a/t/spock_exception_table_case_transdiscard3.py b/t/spock_exception_table_case_transdiscard3.py index 95887ac..7b4c4d1 100644 --- a/t/spock_exception_table_case_transdiscard3.py +++ b/t/spock_exception_table_case_transdiscard3.py @@ -35,17 +35,28 @@ print(f"The list-nodes command returns = {res}\n") print("*"*100) +## Set the exception logging behaviors for the test: for n in range(num_nodes): n=n+1 - ## Set spock.exception_behaviour to transdiscard: - res_set_seb=util_test.set_guc('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) - ## Set spock.exception_logging to all: - res_set_sel=util_test.set_guc('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") - ## Check server status after the reload - status=util_test.check_status(f"{cluster_dir}/n{n}", pgv) + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") print("Setup starts") ## Setup - on each node: @@ -66,14 +77,6 @@ result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) print(f"SELECT * from spock.tables returns: {result}") print("*"*100) - ## Check replication - status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") - print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") - print("*"*100) - port = port + 1 - - if "replicating" not in status.stdout: - util_test.EXIT_FAIL() ## Add one row that should be replicated from n1 to n2: From 66dbfb0dbe01ea83a284cc4b8b010dc8886faef2 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Tue, 26 Nov 2024 12:46:02 +0000 Subject: [PATCH 45/48] Updated filtering and long-test schedules to use cluster-init.py setup (for dependability) --- schedule_files/filtering_schedule | 9 ++------- schedule_files/long-test | 5 +---- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/schedule_files/filtering_schedule b/schedule_files/filtering_schedule index 4ee866e..ce55cda 100644 --- a/schedule_files/filtering_schedule +++ b/schedule_files/filtering_schedule @@ -1,13 +1,8 @@ ## Set up a two node cluster t/setup_01_install.py -#t/300_setup_script.pl - -## Setup scripts for lower level directory -t/setup_03_node_install.py -t/setup_04_node_setup.py -t/spock_2_node_create.py -t/spock_3_sub_create.py +## Setup scripts +t/cluster-init.py #Filtering scripts t/column_filtering.pl diff --git a/schedule_files/long-test b/schedule_files/long-test index 1d25342..b0a5fda 100644 --- a/schedule_files/long-test +++ b/schedule_files/long-test @@ -202,10 +202,7 @@ t/cleanup_03_remove_nc.py ## FILTERING t/setup_01_install.py -t/setup_03_node_install.py -t/setup_04_node_setup.py -t/spock_2_node_create.py -t/spock_3_sub_create.py +t/cluster-init.py t/column_filtering.pl t/row_filtering.pl t/partition_filtering.pl From f67a1611e56d5ad5d34b512020c866e921285b2a Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 4 Dec 2024 16:30:38 +0000 Subject: [PATCH 46/48] Updated t/cluster-init.py for changes to .json file mgmt for UltraHA; cluster schedule broken in several places still --- t/cluster-init.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/t/cluster-init.py b/t/cluster-init.py index ec8d47f..6043207 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -23,17 +23,22 @@ cwd=os.getcwd() num_nodes=3 - +port=6432 +port1=6433 +port2=6434 #print("*"*100) print(f"home_dir = {home_dir}\n") -command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --port={port} --pg={pgv} --force") res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") print(f"res = {res}\n") new_ver = (f"{spockver}") -print(f"Spock new version is: {new_ver}") +port_0 = (f"{port}") +port_1 = (f"{port1}") +port_2 = (f"{port2}") +print(f"Spock new version, port, port_01, port_02 is: {new_ver}, {port_0}, {port_1}, {port_2}") new_path_0 = (f"{cwd}/{cluster_dir}/n1") new_path_1 = (f"{cwd}/{cluster_dir}/n2") new_path_2 = (f"{cwd}/{cluster_dir}/n3") @@ -43,6 +48,9 @@ data = json.load(file) #print(data) data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["port"] = port_0 + data["node_groups"][1]["port"] = port_1 + data["node_groups"][2]["port"] = port_2 data["node_groups"][0]["path"] = new_path_0 data["node_groups"][1]["path"] = new_path_1 data["node_groups"][2]["path"] = new_path_2 From 0ae205d72879738f373c5d03fd1eabd681e15558 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 4 Dec 2024 20:11:16 +0000 Subject: [PATCH 47/48] Updating cluster files for changes to .json-create format --- t/cluster-add-node.py | 5 ++--- t/cluster-init.py | 8 +++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/t/cluster-add-node.py b/t/cluster-add-node.py index 9258079..498021c 100644 --- a/t/cluster-add-node.py +++ b/t/cluster-add-node.py @@ -58,8 +58,7 @@ ## Create the json file for n4: -data = {'json_version': 1.0, 'node_groups': [{'ssh': {'os_user': 'ec2-user', 'private_key': ''}, 'name': 'n4', 'is_active': 'on', 'public_ip': '127.0.0.1', 'private_ip': '127.0.0.1', -'port': '6435', 'path': '/home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n4'}]} +data = {'json_version': 1.1, 'node_groups': [{'ssh': {'os_user': 'ec2-user', 'private_key': ''}, 'name': 'n4', 'is_active': 'on', 'public_ip': '127.0.0.1', 'private_ip': '127.0.0.1', 'port': '6435', 'path': '/home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n4', 'replicas': '0'}]} file_name = 'n4.json' @@ -71,7 +70,7 @@ source = (f"n4.json") target = (f"{home_dir}/n4.json") -#print(f"home_dir = {home_dir}\n") +print(f"home_dir = {home_dir}\n") print(f"We need to copy that file to: {home_dir}") shutil.move(source, target) print("*"*100) diff --git a/t/cluster-init.py b/t/cluster-init.py index 6043207..60a156b 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -30,15 +30,16 @@ #print("*"*100) print(f"home_dir = {home_dir}\n") -command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --port={port} --pg={pgv} --force") +command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --port={port} --pg_ver={pgv} --force") res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") print(f"res = {res}\n") +pg_ver = (f"{pgv}") new_ver = (f"{spockver}") port_0 = (f"{port}") port_1 = (f"{port1}") port_2 = (f"{port2}") -print(f"Spock new version, port, port_01, port_02 is: {new_ver}, {port_0}, {port_1}, {port_2}") +print(f"Spock new version, pg version, port, port_01, port_02 is: {new_ver}, {pg_ver}, {port_0}, {port_1}, {port_2}") new_path_0 = (f"{cwd}/{cluster_dir}/n1") new_path_1 = (f"{cwd}/{cluster_dir}/n2") new_path_2 = (f"{cwd}/{cluster_dir}/n3") @@ -46,7 +47,8 @@ with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: data = json.load(file) - #print(data) + #print(f"Line 49 - {data}") + data["pgedge"]["pg_version"] = pg_ver data["pgedge"]["spock"]["spock_version"] = new_ver data["node_groups"][0]["port"] = port_0 data["node_groups"][1]["port"] = port_1 From 5b8eea77a4f7cbec78ba7c42769a9fdfeaa33784 Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 4 Dec 2024 20:20:31 +0000 Subject: [PATCH 48/48] Updated cluster module 2 node deployment script --- t/cluster-init-2-node-cluster.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/t/cluster-init-2-node-cluster.py b/t/cluster-init-2-node-cluster.py index bd75c1a..28a2acc 100644 --- a/t/cluster-init-2-node-cluster.py +++ b/t/cluster-init-2-node-cluster.py @@ -23,15 +23,20 @@ cwd=os.getcwd() num_nodes=2 - +port=6432 +port1=6433 #print("*"*100) print(f"home_dir = {home_dir}\n") -command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --pg_ver={pgv} --port={port} --force") res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") print(f"res = {res}\n") + +port_0 = (f"{port}") +port_1 = (f"{port1}") +pg_ver = (f"{pgv}") new_ver = (f"{spockver}") print(f"Spock new version is: {new_ver}") new_path_0 = (f"{cwd}/{cluster_dir}/n1") @@ -41,7 +46,10 @@ with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: data = json.load(file) #print(data) + data["pgedge"]["pg_version"] = pg_ver data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["port"] = port_0 + data["node_groups"][1]["port"] = port_1 data["node_groups"][0]["path"] = new_path_0 data["node_groups"][1]["path"] = new_path_1