diff --git a/schedule_files/ace_basic b/schedule_files/ace_basic index cf080e21..9fa9b135 100644 --- a/schedule_files/ace_basic +++ b/schedule_files/ace_basic @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/ace_01_setup.py @@ -19,5 +19,5 @@ t/ace_60_table_repair.py t/ace_61_table_repair_errors.py t/ace_99_cleanup.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/ace_functionality b/schedule_files/ace_functionality index 53adc704..f774bfc6 100644 --- a/schedule_files/ace_functionality +++ b/schedule_files/ace_functionality @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/ace_70_functionality.py @@ -9,5 +9,5 @@ t/ace_72_edge_cases.py t/ace_73_bigloop.py t/ace_74_diff_files.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/ace_long b/schedule_files/ace_long index b8d42605..4d1e36fa 100644 --- a/schedule_files/ace_long +++ b/schedule_files/ace_long @@ -1,6 +1,6 @@ t/setup_01_install.py -t/setup_02_nodecreate.py -t/setup_03_noderun.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/cluster_1_gen_json.py t/spock_1_setup.py @@ -31,5 +31,5 @@ t/ace_73_bigloop.py t/ace_74_diff_files.py t/ace_99_cleanup.py -t/cleanup_01_noderemove.py -t/cleanup_02_pgremove.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/auto_ddl_schedule b/schedule_files/auto_ddl_schedule index 4940ce4c..4ecae503 100644 --- a/schedule_files/auto_ddl_schedule +++ b/schedule_files/auto_ddl_schedule @@ -1,21 +1,18 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## # sub-create ## -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl +t/spock_3_sub_create.py ## # enable autoDDL GUCS ## @@ -24,6 +21,7 @@ t/6011_setup_autoddl_gucs_on_n2.pl ## # autoDDL scripts ## +t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql t/auto_ddl/6100b_table_validate_and_drop_n2.sql t/auto_ddl/6100c_table_validate_n1.sql @@ -51,18 +49,15 @@ t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql t/auto_ddl/6666a_all_objects_create_n1.sql t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql t/auto_ddl/6666c_all_objects_validate_n1.sql +t/auto_ddl/6901_env_cleanup_autoddl_n1.sql ## # cleanup scripts ## t/6910_teardown_autoddl_gucs_off_n1.pl t/6911_teardown_autoddl_gucs_off_n2.pl -t/8082_env_sub_drop_n1.pl -t/8083_env_sub_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl +t/spock_6_drop.py ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py diff --git a/schedule_files/cluster_schedule b/schedule_files/cluster_schedule index 869783b4..cceb476d 100644 --- a/schedule_files/cluster_schedule +++ b/schedule_files/cluster_schedule @@ -1,27 +1,30 @@ ## Set up tests for a two node cluster -t/020_nodectl_install_pgedge.pl -t/cluster-create-json.py -t/cluster-json-validate.py -t/cluster-json-invalid-file.py +t/setup_01_install.py t/cluster-init.py -t/get_info.py t/cluster-remove-node.py t/cluster-add-node.py t/cluster-list-nodes.py -t/cluster-init-bad-json.py t/cluster-replication-check.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py + +t/setup_01_install.py +t/cluster-create-json.py +t/cluster-json-validate.py +t/cluster-json-invalid-file.py +t/cluster-init-bad-version.py +t/cluster-init-bad-json.py + +## Remove components, Clean environment and free ports +t/cleanup_03_remove_nc.py ## Multi-node cluster tests -t/020_nodectl_install_pgedge.pl -t/multi-db_cluster_setup.py +t/setup_01_install.py +#t/multi-db_cluster_setup.py t/multi-db_cluster_exercise_ace.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +#t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/db_schedule b/schedule_files/db_schedule index c0acf643..6222d09f 100644 --- a/schedule_files/db_schedule +++ b/schedule_files/db_schedule @@ -1,12 +1,9 @@ ## # -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py t/db-guc-show.py t/db-guc-show-no-guc.py @@ -19,8 +16,5 @@ t/db-guc-set-no-reload.py t/db-guc-set-invalid-value.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - - +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/filtering_schedule b/schedule_files/filtering_schedule index 7b84a8cf..ce55cdaa 100644 --- a/schedule_files/filtering_schedule +++ b/schedule_files/filtering_schedule @@ -1,18 +1,8 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl -#t/300_setup_script.pl - -## Setup scripts for lower level directory -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl +t/setup_01_install.py +## Setup scripts +t/cluster-init.py #Filtering scripts t/column_filtering.pl @@ -20,8 +10,7 @@ t/row_filtering.pl t/partition_filtering.pl ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/long-test b/schedule_files/long-test index 0e687aee..b0a5fda2 100644 --- a/schedule_files/long-test +++ b/schedule_files/long-test @@ -1,45 +1,46 @@ -## Set up a two node cluster -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl -#t/get_info.py +## SERVICE MODULE: -## Test Service Module +t/setup_01_install.py +t/install_PGs_and_exercise_service.py -t/service_reload_component.pl -t/service_restart_component.pl -t/service_start_component.pl -t/service_stop_component.pl -t/service_status_without_flag.pl -t/service_enable_component.pl -t/service_disable_component.pl -t/service_enable_error.pl -#t/get_info.py +## UM MODULE -## Test UM Module t/um_install_available_components.py t/um_update_available_components.py t/um_remove_available_components.py -#t/get_info.py +t/cleanup_03_remove_nc.py -## At this point, we do not have a cluster; we have created nc, and installed pgedge on the lower level. There is a -## data directory remaining from the above removal process. +## CLUSTER MODULE -## Stand up rest of cluster -t/cluster-create-json.py -t/cluster-init-bad-json.py -t/cluster-json-invalid-file.py -t/cluster-json-validate.py +t/setup_01_install.py t/cluster-init.py -#t/get_info.py t/cluster-remove-node.py t/cluster-add-node.py t/cluster-list-nodes.py -t/cluster-init-bad-json.py t/cluster-replication-check.py -## Test the DB module -## We have a two node cluster in: /home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n1 and n2 +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py +t/setup_01_install.py +t/cluster-create-json.py +t/cluster-json-validate.py +t/cluster-json-invalid-file.py +t/cluster-init-bad-version.py +t/cluster-init-bad-json.py +## QubeRT specific tests +t/cleanup_03_remove_nc.py +t/setup_01_install.py +t/multi-db_cluster_setup.py +t/multi-db_cluster_exercise_ace.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py + +## DB MODULE +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py t/db-guc-show.py t/db-guc-show-no-guc.py t/db-guc-show-wildcard.py @@ -48,69 +49,48 @@ t/db-guc-set.py t/db-guc-set-invalid-type.py t/db-guc-set-no-reload.py t/db-guc-set-invalid-value.py -#t/get_info.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## At this point, we still have a two node cluster: /home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n1 and n2 +## SPOCK MODULE -## Test sub_tests -# error tests +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/spock_node_create_no_node_name.py t/spock_node_create_no_repset_user.py t/spock_node_create_no_dbname.py t/spock_node_create_no_dns.py -#t/get_info.py - -## -# sub --synchronize_structure tests -## t/spock_sub_create_synch_struct_n1.py t/spock_sub_create_synch_struct_n2.py -#t/get_info.py - -# cleanup scripts t/spock_sub_create_synch_cleanup.py -t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8086_env_node_drop_n1.pl t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# sub --synchronize_data tests -## t/spock_sub_create_synch_data_n1.py t/spock_sub_create_synch_data_n2.py -#t/get_info.py - -# cleanup scripts t/spock_sub_create_synch_cleanup.py t/8083_env_sub_drop_n2.pl t/8086_env_node_drop_n1.pl t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# sub --synchronize_structure and --synchronize_data tests -## t/spock_sub_create_synch_all_n1.py t/spock_sub_create_synch_all_n2.py -#t/get_info.py - -# cleanup scripts -t/spock_sub_create_synch_cleanup.py -t/8083_env_sub_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -#t/get_info.py +t/spock_node_add_interface.py +t/spock_node_drop_interface.py +t/spock_node_add_interface_no_db.py +t/spock_node_drop_interface_no_interface.py +t/spock_node_drop_interface_no_db.py +t/spock_7_negative_list.py +t/spock_8_negative_create.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## -#t/8000a_env_setup_pgedge_node1.pl -#t/8001a_env_setup_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -#t/get_info.py +## PGBENCH -## pgbench +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py t/pgbench-install.py t/pgbench-remove_leaves_my_table.py t/pgbench-install_with_repset.py @@ -118,11 +98,6 @@ t/pgbench-run.py t/pgbench-validate.py t/pgbench-install_skip.py t/pgbench-remove.py -#t/get_info.py - -## -#pgBench Negative-Tests -# t/pgbench-install_no_dbname.py t/pgbench-install_invalid_dbname.py t/pgbench-install_invalid_dbname_valid_repsetname.py @@ -134,198 +109,115 @@ t/pgbench-run_invalid_rate.py t/pgbench-validate_no_dbname.py t/pgbench-validate_invalid_dbname.py t/pgbench-remove_no_dbname.py -#t/get_info.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py + +## NORTHWIND + +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py +t/northwind-install.py +t/northwind-install_with_repset.py +t/northwind-run.py +t/northwind-validate.py +t/northwind-remove.py +t/northwind-install_no_dbname.py +t/northwind-install_invalid_dbname.py +t/northwind-run_dependency_on_northwind-install.py +t/northwind-install_valid_dbname_invalid_repsetname.py +t/northwind-run_without_dbname.py +t/northwind-run_without_offset.py +t/northwind-validate_no_dbname.py +t/northwind-validate_invalid_dbname.py +t/northwind-remove_no_dbname.py +t/northwind-remove_invalid_dbname.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py ##REPSET TESTS -# -# repset replicateDelete=False test cases -## + +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py +## repset replicateDelete=False test cases t/8060_env_delete_false_n1.pl t/8061_env_sub_n1n2_delete_false.pl t/8062_env_delete_false_n2.pl t/8063_env_sub_n2n1_delete_false.pl t/8064_env_delete_replication_check.pl -#t/get_info.py - -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -#t/get_info.py - -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -#t/get_info.py - -## -# repset replicateTruncate=False test cases -## +## repset replicateInsert=False test cases +t/8065_env_insert_false_n1.pl +t/8066_env_sub_n1n2_insert_false.pl +t/8067_env_insert_false_n2.pl +t/8068_env_sub_n2n1_insert_false.pl +t/8069_env_insert_replication_check.pl +t/8080_env_repset_drop_n1.pl +t/8081_env_repset_drop_n2.pl +t/8082_env_sub_drop_n1.pl +t/8083_env_sub_drop_n2.pl +t/8084_env_table_drop_n1.pl +t/8085_env_table_drop_n2.pl +## repset replicateTruncate=False test cases t/8075_env_truncate_false_n1.pl t/8076_env_sub_n1n2_truncate_false.pl t/8077_env_truncate_false_n2.pl t/8078_env_sub_n2n1_truncate_false.pl t/8079_env_truncate_replication_check.pl -#t/get_info.py - -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -## -# repset replicateUpdate=False test cases -## +## repset replicateUpdate=False test cases t/8070_env_update_false_n1.pl t/8071_env_sub_n1n2_update_false.pl t/8072_env_update_false_n2.pl t/8073_env_sub_n2n1_update_false.pl t/8074_env_update_replication_check.pl -## -# cleanup scripts -## t/8080_env_repset_drop_n1.pl t/8081_env_repset_drop_n2.pl t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -## -# spock node-add and node-drop test cases -## -t/spock_node_add_interface.py -t/spock_node_drop_interface.py -t/spock_node_add_interface_no_db.py -t/spock_node_drop_interface_no_interface.py -t/spock_node_drop_interface_no_db.py - -## -# spock sub-create -r 'repset_array' -## - t/spock_create_sub_specify_repsets.py t/spock_create_sub_specify_repsets_nonrepset_user.py t/spock_sub_remove_repset.py t/spock_sub_remove_repset_error.py - -## -# spock repset-create errors -## - t/spock_repset_create_error_1.py t/spock_repset_create_error_2.py t/spock_repset_create_error_3.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -# cleanup scripts -## - -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl - -# Set up for the next round of tests - -## Test Spock Module -t/spock_1_setup.py -t/spock_2_node_create.py -t/spock_3_sub_create.py -t/spock_4_repset_add_table.py -t/spock_5_cofirm_replication.py - -## Test Ace Module (currently tested in ace-test) -# t/cluster_1_gen_json.py -# t/ace_1_setup.py -# t/ace_2_diff_table.py -# t/ace_3_diff_table_args.py -# t/ace_4_diff_additional.py -# t/ace_99_cleanup.py - -## Test Drop and Negative Spock Module -t/spock_6_drop.py -t/spock_7_negative_list.py -t/spock_8_negative_create.py - -## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - -## Run the multi-db cluster tests -t/020_nodectl_install_pgedge.pl -t/multi-db_cluster_setup.py -t/multi-db_cluster_exercise_ace.py - -## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - -## Setup for Filtering Tests - -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl -t/6000_setup_sub_create_n1n2_n1.pl -t/6001_setup_sub_create_n2n1_n2.pl - -#Filtering Scripts - +## FILTERING +t/setup_01_install.py +t/cluster-init.py t/column_filtering.pl t/row_filtering.pl t/partition_filtering.pl +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py -## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl - -## Setup for Snowflake Tests - -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +## SNOWFLAKE +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py t/8051_env_create_node1.pl t/8052_env_create_node2.pl - -# Snowflake Scripts - t/snowflake.py t/snowflake_script.py t/snowflake_spock_cmds.py - -## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py - +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/northwind_schedule b/schedule_files/northwind_schedule index e72cc0ea..4f29fadd 100644 --- a/schedule_files/northwind_schedule +++ b/schedule_files/northwind_schedule @@ -1,12 +1,8 @@ ## -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl - +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py #northwind-install commands # @@ -29,6 +25,5 @@ t/northwind-remove_no_dbname.py t/northwind-remove_invalid_dbname.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/pgbench_schedule b/schedule_files/pgbench_schedule index 358bdf5b..c9119237 100644 --- a/schedule_files/pgbench_schedule +++ b/schedule_files/pgbench_schedule @@ -1,11 +1,8 @@ ## -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/spock_2_node_create.py ## pgbench t/pgbench-install.py @@ -33,6 +30,5 @@ t/pgbench-validate_invalid_dbname.py t/pgbench-remove_no_dbname.py ##Teardown Scripts -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/repset_tests b/schedule_files/repset_tests index 53604227..09b93021 100644 --- a/schedule_files/repset_tests +++ b/schedule_files/repset_tests @@ -1,15 +1,13 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py ## # repset replicateDelete=False test cases ## @@ -27,13 +25,6 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateInsert=False test cases ## @@ -51,13 +42,7 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -## -# node creation ## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateTruncate=False test cases ## @@ -75,13 +60,7 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl ## -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # repset replicateUpdate=False test cases ## @@ -99,12 +78,6 @@ t/8082_env_sub_drop_n1.pl t/8083_env_sub_drop_n2.pl t/8084_env_table_drop_n1.pl t/8085_env_table_drop_n2.pl -t/8086_env_node_drop_n1.pl -t/8087_env_node_drop_n2.pl -# node creation -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl ## # spock node-add and node-drop test cases ## @@ -134,5 +107,5 @@ t/spock_repset_create_error_3.py ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/service_schedule b/schedule_files/service_schedule index 595d5fcc..195d30f2 100644 --- a/schedule_files/service_schedule +++ b/schedule_files/service_schedule @@ -1,6 +1,6 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl +t/setup_01_install.py +t/setup_02_setup.py ## Test Service Module t/service_reload_component.pl @@ -14,5 +14,4 @@ t/service_enable_error.pl t/399_um_breakdown_script.pl -# 9998X.py to deletes the nc and pgpass directory, etc. -t/9998_remove_nc_and_pgpass_dirs.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/short-test b/schedule_files/short-test index 83dd9e73..4239e9d3 100644 --- a/schedule_files/short-test +++ b/schedule_files/short-test @@ -1,8 +1,7 @@ -t/020_nodectl_install_pgedge.pl -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py +t/cluster_1_gen_json.py t/spock_1_setup.py t/spock_2_node_create.py @@ -10,4 +9,5 @@ t/spock_3_sub_create.py t/spock_4_repset_add_table.py t/spock_5_cofirm_replication.py -t/spock_99_cleanup.py +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/snowflake b/schedule_files/snowflake index 2b432c9f..88042e12 100644 --- a/schedule_files/snowflake +++ b/schedule_files/snowflake @@ -1,23 +1,16 @@ ## Set up a two node cluster -t/020_nodectl_install_pgedge.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py - -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +t/spock_2_node_create.py t/snowflake.py t/snowflake_script.py t/snowflake_spock_cmds.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py - - +t/cleanup_03_remove_nc.py diff --git a/schedule_files/spock_4.0 b/schedule_files/spock_4.0 index 467df5b4..54617221 100644 --- a/schedule_files/spock_4.0 +++ b/schedule_files/spock_4.0 @@ -1,23 +1,22 @@ -## Set up a two node cluster -t/020_nodectl_install_pgedge.pl - -## Setup scripts for lower level directory -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl -## -t/8051_env_create_node1.pl -t/8052_env_create_node2.pl +## Spock repair mode functionality +t/setup_01_install.py +t/cluster-init-2-node-cluster.py ## Spock 4.0 Scripts -t/spock_repair_function.py +#t/spock_repair_function.py +#t/spock_exception_table_case_discard1.py +#t/spock_exception_table_case_discard2.py +#t/spock_exception_table_case_discard3.py +#t/spock_exception_table_case_sub-disable1.py +#t/spock_exception_table_case_sub-disable2.py +#t/spock_exception_table_case_sub-disable3.py +t/spock_exception_table_case_transdiscard1.py +t/spock_exception_table_case_transdiscard2.py +t/spock_exception_table_case_transdiscard3.py ## Remove components, Clean environment and free ports -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl - +#t/cleanup_01_node_remove.py # Delete the nc directory and pgpass file -t/9998_remove_nc_and_pgpass_dirs.py +#t/cleanup_03_remove_nc.py diff --git a/schedule_files/sub_tests b/schedule_files/sub_tests index 9414fde7..6e7b76e1 100644 --- a/schedule_files/sub_tests +++ b/schedule_files/sub_tests @@ -1,10 +1,9 @@ ## # setup scripts ## -t/8000a_env_setup_pgedge_node1.pl -t/8001a_env_setup_pgedge_node2.pl -t/8000b_install_pgedge_node1.pl -t/8001b_install_pgedge_node2.pl +t/setup_01_install.py +t/setup_03_node_install.py +t/setup_04_node_setup.py ## # error tests @@ -48,5 +47,5 @@ t/spock_sub_create_synch_all_n2.py ## # uninstall pgedge ## -t/8998_env_remove_pgedge_node1.pl -t/8999_env_remove_pgedge_node2.pl +t/cleanup_01_node_remove.py +t/cleanup_03_remove_nc.py diff --git a/schedule_files/um_schedule b/schedule_files/um_schedule index 9b7e0049..d7de7e38 100644 --- a/schedule_files/um_schedule +++ b/schedule_files/um_schedule @@ -1,9 +1,10 @@ -t/020_nodectl_install_pgedge.pl -t/300_setup_script.pl +t/setup_01_install.py +#t/setup_02_setup.py -t/um1_install_available_components.py -#t/um_install_available_components.py -#t/um_update_available_components.py -#t/um_remove_available_components.py +t/install_PGs_and_exercise_service.py +t/um_install_available_components.py +t/um_update_available_components.py +t/um_remove_available_components.py -t/9998_remove_nc_and_pgpass_dirs.py +#t/cleanup_02_remove.py +t/cleanup_03_remove_nc.py diff --git a/t/600_cluster_setup_script_v15.pl b/t/600_cluster_setup_script_v15.pl deleted file mode 100644 index 4534998a..00000000 --- a/t/600_cluster_setup_script_v15.pl +++ /dev/null @@ -1,56 +0,0 @@ -# This test case runs the command: -# ./nodectl cluster create-local demo 2 --pg 15 -# - -use strict; -use warnings; -use lib './t/lib'; -use contains; -use File::Which; -use IPC::Cmd qw(run); -use Try::Tiny; -use JSON; - -# -# Move into the pgedge directory. -# - chdir("./pgedge"); - -# -# First, we use nodectl to create a two-node cluster named demo; the nodes are named n1/n2 (default names), -# the database is named lcdb (default), and it is owned by lcdb (default). At this point, lcdb is not added -# to the .pgpass file. -# - -my $cmd = qq(./nodectl cluster local-create demo 2 --pg 15); -print("cmd = $cmd\n"); -my ($success, $error_message, $full_buf, $stdout_buf, $stderr_buf)= IPC::Cmd::run(command => $cmd, verbose => 0); - -# -# Print statements -# - -print("full_buf = @$full_buf\n"); -print("stderr_buf = @$stderr_buf\n"); - -print("This s/b a 2 node cluster named demo, owned by lcdb, with a db named lcdb. The nodes are named n1/n2.\n"); -print("Right now, they're running on 6432 and 6433\n"); - -# -# Then, we retrieve the Postgres version (the component) number from nodectl in json form... -# this is to catch cases where more than one copy of Postgres is running. -# -my $json = `./nc --json info pg15`; -my $out = decode_json($json); -my $component = $out->[0]->{"component"}; -print("The cluster is running = {$component}\n"); - -if(contains($component, "pg15")) -{ -exit(0); -} -else -{ -exit(1); -} - diff --git a/t/8064_env_delete_replication_check.pl b/t/8064_env_delete_replication_check.pl index 4824ba49..1a0308db 100644 --- a/t/8064_env_delete_replication_check.pl +++ b/t/8064_env_delete_replication_check.pl @@ -130,7 +130,7 @@ # Listing table contents of Port2 6433 # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print("cmd999 = $cmd999\n"); diff --git a/t/8069_env_insert_replication_check.pl b/t/8069_env_insert_replication_check.pl index 9b73c135..ef036463 100644 --- a/t/8069_env_insert_replication_check.pl +++ b/t/8069_env_insert_replication_check.pl @@ -225,7 +225,7 @@ # Listing table contents of Port2 6433 # # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print("TRUNCATE FUNCTION REPLICATION CHECK IN NODE n2\n"); diff --git a/t/8074_env_update_replication_check.pl b/t/8074_env_update_replication_check.pl index 03042bdb..e0a2366e 100644 --- a/t/8074_env_update_replication_check.pl +++ b/t/8074_env_update_replication_check.pl @@ -132,7 +132,7 @@ print("INSERT=TRUE REPLICATION CHECK IN NODE n2\n"); # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print ("-"x45,"\n"); diff --git a/t/8079_env_truncate_replication_check.pl b/t/8079_env_truncate_replication_check.pl index b43dd875..bfbe3216 100644 --- a/t/8079_env_truncate_replication_check.pl +++ b/t/8079_env_truncate_replication_check.pl @@ -173,7 +173,7 @@ print("INSERT=TRUE REPLICATION CHECK IN NODE n2\n"); # print("Adding call to sleep function") - my $cmd999 = qq(sleep($seconds)); + my $cmd999 = qq(sleep $seconds); my($success999, $error_message999, $full_buf999, $stdout_buf999, $stderr_buf999)= IPC::Cmd::run(command => $cmd999, verbose => 0); print ("-"x45,"\n"); diff --git a/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out new file mode 100644 index 00000000..b5c71b4a --- /dev/null +++ b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.out @@ -0,0 +1,33 @@ +-- This is a pre-req file that needs to executed prior to any of the autoDDL sql tests +-- This will create the necessary shared objects needed by the autoDDL tests +--creating a superuser +CREATE ROLE adminuser SUPERUSER LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +--creating a non superuser that will have access to the public schema as well as user schemas +-- the permission on the public schema will be granted here whereas the individual schema privileges +-- will be assigned in the individual tests. +CREATE ROLE appuser LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser; +INFO: DDL statement replicated. +GRANT +-- Creating a function with SECURITY DEFINER privileges so that a nonsuper +-- can query the spock.table catalog to check for tables' repset assignments +CREATE OR REPLACE FUNCTION public.get_table_repset_info(partial_name TEXT) +RETURNS TABLE (nspname TEXT, relname TEXT, set_name TEXT) +LANGUAGE sql +SECURITY DEFINER AS +$$ +SELECT nspname, relname, set_name +FROM spock.tables +WHERE relname LIKE '%' || partial_name || '%' +ORDER BY relid; +$$; +INFO: DDL statement replicated. +CREATE FUNCTION +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser; +INFO: DDL statement replicated. +GRANT diff --git a/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql new file mode 100644 index 00000000..83dc93c3 --- /dev/null +++ b/t/auto_ddl/6001_env_prereq_autoddl_setup_n1.sql @@ -0,0 +1,29 @@ +-- This is a pre-req file that needs to executed prior to any of the autoDDL sql tests +-- This will create the necessary shared objects needed by the autoDDL tests + +--creating a superuser +CREATE ROLE adminuser SUPERUSER LOGIN; + +--creating a non superuser that will have access to the public schema as well as user schemas +-- the permission on the public schema will be granted here whereas the individual schema privileges +-- will be assigned in the individual tests. +CREATE ROLE appuser LOGIN; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser; + +-- Creating a function with SECURITY DEFINER privileges so that a nonsuper +-- can query the spock.table catalog to check for tables' repset assignments +CREATE OR REPLACE FUNCTION public.get_table_repset_info(partial_name TEXT) +RETURNS TABLE (nspname TEXT, relname TEXT, set_name TEXT) +LANGUAGE sql +SECURITY DEFINER AS +$$ +SELECT nspname, relname, set_name +FROM spock.tables +WHERE relname LIKE '%' || partial_name || '%' +ORDER BY relid; +$$; + +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser; + diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out index 401e4423..e0fc46e0 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.out @@ -1,10 +1,23 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6100a_create_alter_table_n1.sql -- This script creates and alters tables on node n1 to test the autoDDL functionality. -- It includes a wide variety of data types and exercises several CREATE TABLE/ ALTER TABLE DDL constructs. -- Also regularly verifying spock.tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +CREATE SCHEMA IF NOT EXISTS s610; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s610 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Create a table for employee details with various data types CREATE TABLE employees ( emp_id INT PRIMARY KEY, @@ -30,7 +43,7 @@ INSERT INTO employees (emp_id, first_name, last_name, email, hire_date, birth_ti INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default -----------------+-----------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -51,10 +64,10 @@ Indexes: Check constraints: "chk_salary" CHECK (salary > 0::numeric) -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) -- Create a table for department details @@ -75,7 +88,7 @@ INSERT INTO departments (dept_id, dept_name, location, established, budget, acti INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default -------------+------------------------+-----------+----------+--------- dept_id | integer | | not null | @@ -87,10 +100,10 @@ INSERT 0 2 Indexes: "departments_pkey" PRIMARY KEY, btree (dept_id) -EXECUTE spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) -- Alter table employees to add new columns, modify existing columns, and add constraints @@ -111,7 +124,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default -----------------+-----------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -136,10 +149,10 @@ Check constraints: Foreign-key constraints: "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) -- Insert additional data with new columns @@ -166,7 +179,7 @@ INSERT INTO projects (project_id, project_name, start_date, end_date, budget, ac INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- project_id | integer | | not null | @@ -181,10 +194,10 @@ Indexes: Check constraints: "projects_budget_check" CHECK (budget > 0::numeric) -EXECUTE spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); nspname | relname | set_name ---------+----------+---------- - public | projects | default + s610 | projects | default (1 row) -- Create a table for employee projects (many-to-many relationship) @@ -207,7 +220,7 @@ INSERT INTO employee_projects (emp_id, project_id, hours_worked, role) VALUES INSERT 0 3 -- Validate the structure, spock.tables catalog table and data \d employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default --------------+-----------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -220,10 +233,10 @@ Foreign-key constraints: "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) -EXECUTE spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) -- Create additional tables to cover more data types and constraints @@ -235,7 +248,7 @@ CREATE TABLE products ( discontinued BOOLEAN, product_description TEXT, added TIMESTAMP WITHOUT TIME ZONE, - updated TIMESTAMPTZ + updated TIMESTAMP WITHOUT TIME ZONE ); INFO: DDL statement replicated. CREATE TABLE @@ -246,7 +259,7 @@ INSERT INTO products (product_id, product_name, price, stock_quantity, discontin INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default ---------------------+-----------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -256,14 +269,14 @@ INSERT 0 2 discontinued | boolean | | | product_description | text | | | added | timestamp without time zone | | | - updated | timestamp with time zone | | | + updated | timestamp without time zone | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) -- Alter table products to add and modify columns @@ -278,7 +291,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default ---------------------+-----------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -288,17 +301,17 @@ ALTER TABLE discontinued | boolean | | | product_description | text | | | added | timestamp without time zone | | | - updated | timestamp with time zone | | | + updated | timestamp without time zone | | | category | character varying(50) | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) Check constraints: "price_check" CHECK (price > 0::numeric) -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) -- Update product data @@ -319,7 +332,7 @@ INSERT INTO "CaseSensitiveTable" ("ID", "Name", "Value") VALUES INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- ID | integer | | not null | @@ -328,10 +341,10 @@ INSERT 0 2 Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") -EXECUTE spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) -- Create table to test various ALTER TABLE operations @@ -356,7 +369,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate the structure, spock.tables catalog table and data \d test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | uuid | | not null | @@ -364,16 +377,16 @@ ALTER TABLE Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) -- Create table to test more data types and constraints CREATE TABLE test_tab2 ( id INT PRIMARY KEY, - timestamp_col TIMESTAMPTZ, + timestamp_col TIMESTAMP WITHOUT TIME ZONE, interval_col INTERVAL, inet_col INET, cidr_col CIDR, @@ -395,30 +408,30 @@ INSERT INTO test_tab2 (id, timestamp_col, interval_col, inet_col, cidr_col, maca INSERT 0 1 -- Validate the structure, spock.tables catalog table and data \d test_tab2 - Table "public.test_tab2" - Column | Type | Collation | Nullable | Default ----------------+--------------------------+-----------+----------+--------- - id | integer | | not null | - timestamp_col | timestamp with time zone | | | - interval_col | interval | | | - inet_col | inet | | | - cidr_col | cidr | | | - macaddr_col | macaddr | | | - bit_col | bit(8) | | | - varbit_col | bit varying(8) | | | - box_col | box | | | - circle_col | circle | | | - line_col | line | | | - lseg_col | lseg | | | - path_col | path | | | - polygon_col | polygon | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default +---------------+-----------------------------+-----------+----------+--------- + id | integer | | not null | + timestamp_col | timestamp without time zone | | | + interval_col | interval | | | + inet_col | inet | | | + cidr_col | cidr | | | + macaddr_col | macaddr | | | + bit_col | bit(8) | | | + varbit_col | bit varying(8) | | | + box_col | box | | | + circle_col | circle | | | + line_col | line | | | + lseg_col | lseg | | | + path_col | path | | | + polygon_col | polygon | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) -- Create table to test composite and array types @@ -437,7 +450,7 @@ INSERT INTO test_tab3 (id, name, int_array, text_array) VALUES INSERT 0 2 -- Validate the structure, spock.tables catalog table and data \d test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default ------------+------------------------+-----------+----------+--------- id | integer | | not null | @@ -447,10 +460,10 @@ INSERT 0 2 Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) -- creating table without primary key to ensure the default repset is default_insert_only @@ -467,10 +480,10 @@ CREATE TABLE INSERT INTO test_tab4 (id, data) VALUES ('m2eebc99', 'Initial data'); INSERT 0 1 -- Execute prepared statement for the table, repset default_insert_only -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Alter table to add a primary key on the id column @@ -479,7 +492,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | text | | not null | @@ -488,10 +501,10 @@ Indexes: "test_tab4_pkey" PRIMARY KEY, btree (id) -- Execute prepared statement for the table, repset default -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab4 | default + s610 | test_tab4 | default (1 row) -- Alter table to remove primary key @@ -510,17 +523,17 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | old_data | character varying(100) | | | -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Alter table to add a primary key on multiple columns @@ -529,7 +542,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | @@ -538,10 +551,10 @@ Indexes: "test_tab4_pkey" PRIMARY KEY, btree (id, old_data) -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab4 | default + s610 | test_tab4 | default (1 row) -- Alter table to drop the primary key @@ -550,17 +563,17 @@ INFO: DDL statement replicated. ALTER TABLE -- Display the table structure \d test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default ----------+------------------------+-----------+----------+--------- id | text | | not null | old_data | character varying(100) | | not null | -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) -- Negative test cases to validate constraints and error handling @@ -602,7 +615,7 @@ UPDATE test_tab5 SET character_col = 'upd_char', jsonb_col = '{"updated_key": "u UPDATE 1 -- Validate the structure of the table \d test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default -------------------------+------------------------+-----------+----------+--------- bigint_col | bigint | | not null | @@ -620,15 +633,15 @@ UPDATE 1 Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) -EXECUTE spocktab('test_tab5'); -- default repset expected +SELECT * FROM get_table_repset_info('test_tab5'); -- default repset expected nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Final validation of all tables along with querying the spock.tables \d+ employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -656,14 +669,14 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) Access method: heap -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) \d+ departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- dept_id | integer | | not null | | plain | | | @@ -678,14 +691,14 @@ Referenced by: TABLE "employees" CONSTRAINT "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) Access method: heap -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) \d+ projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- project_id | integer | | not null | | plain | | | @@ -703,14 +716,15 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('projects'); - nspname | relname | set_name ----------+----------+---------- - public | projects | default -(1 row) +SELECT * FROM get_table_repset_info('projects'); + nspname | relname | set_name +---------+-------------------+---------- + s610 | projects | default + s610 | employee_projects | default +(2 rows) \d+ employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -724,14 +738,14 @@ Foreign-key constraints: "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) \d+ products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -741,7 +755,7 @@ execute spocktab('employee_projects'); discontinued | boolean | | | | plain | | | product_description | text | | | | extended | | | added | timestamp without time zone | | | | plain | | | - updated | timestamp with time zone | | | | plain | | | + updated | timestamp without time zone | | | | plain | | | category | character varying(50) | | | | extended | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -749,14 +763,14 @@ Check constraints: "price_check" CHECK (price > 0::numeric) Access method: heap -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) \d+ "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- ID | integer | | not null | | plain | | | @@ -766,14 +780,14 @@ Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") Access method: heap -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) \d+ test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | uuid | | not null | | plain | | | @@ -782,42 +796,42 @@ Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) \d+ test_tab2 - Table "public.test_tab2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- - id | integer | | not null | | plain | | | - timestamp_col | timestamp with time zone | | | | plain | | | - interval_col | interval | | | | plain | | | - inet_col | inet | | | | main | | | - cidr_col | cidr | | | | main | | | - macaddr_col | macaddr | | | | plain | | | - bit_col | bit(8) | | | | extended | | | - varbit_col | bit varying(8) | | | | extended | | | - box_col | box | | | | plain | | | - circle_col | circle | | | | plain | | | - line_col | line | | | | plain | | | - lseg_col | lseg | | | | plain | | | - path_col | path | | | | extended | | | - polygon_col | polygon | | | | extended | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +---------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- + id | integer | | not null | | plain | | | + timestamp_col | timestamp without time zone | | | | plain | | | + interval_col | interval | | | | plain | | | + inet_col | inet | | | | main | | | + cidr_col | cidr | | | | main | | | + macaddr_col | macaddr | | | | plain | | | + bit_col | bit(8) | | | | extended | | | + varbit_col | bit varying(8) | | | | extended | | | + box_col | box | | | | plain | | | + circle_col | circle | | | | plain | | | + line_col | line | | | | plain | | | + lseg_col | lseg | | | | plain | | | + path_col | path | | | | extended | | | + polygon_col | polygon | | | | extended | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) \d+ test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -828,28 +842,28 @@ Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) \d+ test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | text | | not null | | extended | | | old_data | character varying(100) | | not null | | extended | | | Access method: heap -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) \d+ test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- bigint_col | bigint | | not null | | plain | | | @@ -868,10 +882,10 @@ Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) Access method: heap -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Validating data in all tables @@ -907,10 +921,10 @@ SELECT * FROM employee_projects ORDER BY emp_id, project_id; (3 rows) SELECT * FROM products ORDER BY product_id; - product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category -------------+--------------+-------+----------------+--------------+--------------------------+---------------------+------------------------+---------- - 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 17:00:00+05 | - 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 20:00:00+05 | + product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category +------------+--------------+-------+----------------+--------------+--------------------------+---------------------+---------------------+---------- + 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 12:00:00 | + 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 15:00:00 | (2 rows) SELECT * FROM "CaseSensitiveTable" ORDER BY "ID"; @@ -927,9 +941,9 @@ SELECT * FROM test_tab1 ORDER BY id; (1 row) SELECT * FROM test_tab2 ORDER BY id; - id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col -----+------------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- - 1 | 2023-01-01 17:00:00+05 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) + id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col +----+---------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- + 1 | 2023-01-01 12:00:00 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) (1 row) SELECT * FROM test_tab3 ORDER BY id; diff --git a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql index b9ca681b..28dc15c2 100644 --- a/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql +++ b/t/auto_ddl/6100a_table_datatypes_create_alter_n1.sql @@ -1,10 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6100a_create_alter_table_n1.sql -- This script creates and alters tables on node n1 to test the autoDDL functionality. -- It includes a wide variety of data types and exercises several CREATE TABLE/ ALTER TABLE DDL constructs. -- Also regularly verifying spock.tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; + +CREATE SCHEMA IF NOT EXISTS s610; + +GRANT ALL PRIVILEGES ON SCHEMA s610 TO appuser; + +SET ROLE appuser; + +SET search_path TO s610, public; + -- Create a table for employee details with various data types CREATE TABLE employees ( @@ -30,7 +39,8 @@ INSERT INTO employees (emp_id, first_name, last_name, email, hire_date, birth_ti -- Validate the structure, spock.tables catalog table and data \d employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); + -- Create a table for department details CREATE TABLE departments ( @@ -49,7 +59,7 @@ INSERT INTO departments (dept_id, dept_name, location, established, budget, acti -- Validate the structure, spock.tables catalog table and data \d departments -EXECUTE spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); -- Alter table employees to add new columns, modify existing columns, and add constraints ALTER TABLE employees ADD COLUMN middle_name VARCHAR(100); @@ -60,7 +70,7 @@ ALTER TABLE employees RENAME COLUMN street_address TO address; -- Validate the structure, spock.tables catalog table and data \d employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); -- Insert additional data with new columns INSERT INTO employees (emp_id, first_name, middle_name, last_name, email, hire_date, birth_time, salary, full_time, address, metadata, start_timestamp, emp_coordinates, dept_id) VALUES @@ -85,7 +95,7 @@ INSERT INTO projects (project_id, project_name, start_date, end_date, budget, ac -- Validate the structure, spock.tables catalog table and data \d projects -EXECUTE spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); -- Create a table for employee projects (many-to-many relationship) CREATE TABLE employee_projects ( @@ -106,7 +116,7 @@ INSERT INTO employee_projects (emp_id, project_id, hours_worked, role) VALUES -- Validate the structure, spock.tables catalog table and data \d employee_projects -EXECUTE spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); -- Create additional tables to cover more data types and constraints CREATE TABLE products ( @@ -117,7 +127,7 @@ CREATE TABLE products ( discontinued BOOLEAN, product_description TEXT, added TIMESTAMP WITHOUT TIME ZONE, - updated TIMESTAMPTZ + updated TIMESTAMP WITHOUT TIME ZONE ); -- Insert initial data into products table @@ -127,7 +137,7 @@ INSERT INTO products (product_id, product_name, price, stock_quantity, discontin -- Validate the structure, spock.tables catalog table and data \d products -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); -- Alter table products to add and modify columns ALTER TABLE products ADD COLUMN category VARCHAR(50); @@ -136,7 +146,7 @@ ALTER TABLE products ADD CONSTRAINT price_check CHECK (price > 0); -- Validate the structure, spock.tables catalog table and data \d products -EXECUTE spocktab('products'); +SELECT * FROM get_table_repset_info('products'); -- Update product data UPDATE products SET stock_quantity = 150 WHERE product_id = 1; @@ -155,7 +165,7 @@ INSERT INTO "CaseSensitiveTable" ("ID", "Name", "Value") VALUES -- Validate the structure, spock.tables catalog table and data \d "CaseSensitiveTable" -EXECUTE spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); -- Create table to test various ALTER TABLE operations CREATE TABLE test_tab1 ( @@ -173,12 +183,12 @@ ALTER TABLE test_tab1 RENAME COLUMN data TO old_data; -- Validate the structure, spock.tables catalog table and data \d test_tab1 -EXECUTE spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); -- Create table to test more data types and constraints CREATE TABLE test_tab2 ( id INT PRIMARY KEY, - timestamp_col TIMESTAMPTZ, + timestamp_col TIMESTAMP WITHOUT TIME ZONE, interval_col INTERVAL, inet_col INET, cidr_col CIDR, @@ -199,7 +209,7 @@ INSERT INTO test_tab2 (id, timestamp_col, interval_col, inet_col, cidr_col, maca -- Validate the structure, spock.tables catalog table and data \d test_tab2 -EXECUTE spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); -- Create table to test composite and array types CREATE TABLE test_tab3 ( @@ -216,7 +226,7 @@ INSERT INTO test_tab3 (id, name, int_array, text_array) VALUES -- Validate the structure, spock.tables catalog table and data \d test_tab3 -EXECUTE spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); -- creating table without primary key to ensure the default repset is default_insert_only -- and then play around with adding primary key and dropping them to see the repset @@ -231,14 +241,14 @@ CREATE TABLE test_tab4 ( -- Insert initial data into test_tab4 INSERT INTO test_tab4 (id, data) VALUES ('m2eebc99', 'Initial data'); -- Execute prepared statement for the table, repset default_insert_only -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to add a primary key on the id column ALTER TABLE test_tab4 ADD PRIMARY KEY (id); -- Display the table structure \d test_tab4 -- Execute prepared statement for the table, repset default -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to remove primary key ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; @@ -251,7 +261,7 @@ ALTER TABLE test_tab4 RENAME COLUMN data TO old_data; -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to add a primary key on multiple columns ALTER TABLE test_tab4 ADD PRIMARY KEY (id, old_data); @@ -259,7 +269,7 @@ ALTER TABLE test_tab4 ADD PRIMARY KEY (id, old_data); -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Alter table to drop the primary key ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; @@ -267,7 +277,7 @@ ALTER TABLE test_tab4 DROP CONSTRAINT test_tab4_pkey; -- Display the table structure \d test_tab4 -- Execute prepared statement again for the table -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); -- Negative test cases to validate constraints and error handling -- Attempt to insert a record with a duplicate primary key (should fail) @@ -305,41 +315,41 @@ UPDATE test_tab5 SET character_col = 'upd_char', jsonb_col = '{"updated_key": "u -- Validate the structure of the table \d test_tab5 -EXECUTE spocktab('test_tab5'); -- default repset expected +SELECT * FROM get_table_repset_info('test_tab5'); -- default repset expected -- Final validation of all tables along with querying the spock.tables \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); -- Validating data in all tables SELECT * FROM employees ORDER BY emp_id; diff --git a/t/auto_ddl/6100b_table_validate_and_drop_n2.out b/t/auto_ddl/6100b_table_validate_and_drop_n2.out index 4f1aa622..678b58f6 100644 --- a/t/auto_ddl/6100b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6100b_table_validate_and_drop_n2.out @@ -1,12 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- AutoDDL validation on n2 to ensure all the DDL/DML performed in the 6100a files on n1 -- was auto replicated to n2. -- In the end, the same objects are dropped. --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Final validation of all tables along with querying the spock.tables \d+ employees - Table "public.employees" + Table "s610.employees" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -34,14 +41,14 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_emp_id_fkey" FOREIGN KEY (emp_id) REFERENCES employees(emp_id) Access method: heap -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+-----------+---------- - public | employees | default + s610 | employees | default (1 row) \d+ departments - Table "public.departments" + Table "s610.departments" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- dept_id | integer | | not null | | plain | | | @@ -56,14 +63,14 @@ Referenced by: TABLE "employees" CONSTRAINT "fk_dept" FOREIGN KEY (dept_id) REFERENCES departments(dept_id) Access method: heap -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+-------------+---------- - public | departments | default + s610 | departments | default (1 row) \d+ projects - Table "public.projects" + Table "s610.projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- project_id | integer | | not null | | plain | | | @@ -81,14 +88,15 @@ Referenced by: TABLE "employee_projects" CONSTRAINT "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('projects'); - nspname | relname | set_name ----------+----------+---------- - public | projects | default -(1 row) +SELECT * FROM get_table_repset_info('projects'); + nspname | relname | set_name +---------+-------------------+---------- + s610 | projects | default + s610 | employee_projects | default +(2 rows) \d+ employee_projects - Table "public.employee_projects" + Table "s610.employee_projects" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- emp_id | integer | | not null | | plain | | | @@ -102,14 +110,14 @@ Foreign-key constraints: "employee_projects_project_id_fkey" FOREIGN KEY (project_id) REFERENCES projects(project_id) Access method: heap -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+-------------------+---------- - public | employee_projects | default + s610 | employee_projects | default (1 row) \d+ products - Table "public.products" + Table "s610.products" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -119,7 +127,7 @@ execute spocktab('employee_projects'); discontinued | boolean | | | | plain | | | product_description | text | | | | extended | | | added | timestamp without time zone | | | | plain | | | - updated | timestamp with time zone | | | | plain | | | + updated | timestamp without time zone | | | | plain | | | category | character varying(50) | | | | extended | | | Indexes: "products_pkey" PRIMARY KEY, btree (product_id) @@ -127,14 +135,14 @@ Check constraints: "price_check" CHECK (price > 0::numeric) Access method: heap -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+----------+---------- - public | products | default + s610 | products | default (1 row) \d+ "CaseSensitiveTable" - Table "public.CaseSensitiveTable" + Table "s610.CaseSensitiveTable" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- ID | integer | | not null | | plain | | | @@ -144,14 +152,14 @@ Indexes: "CaseSensitiveTable_pkey" PRIMARY KEY, btree ("ID") Access method: heap -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+--------------------+---------- - public | CaseSensitiveTable | default + s610 | CaseSensitiveTable | default (1 row) \d+ test_tab1 - Table "public.test_tab1" + Table "s610.test_tab1" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | uuid | | not null | | plain | | | @@ -160,42 +168,42 @@ Indexes: "test_tab1_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab1 | default + s610 | test_tab1 | default (1 row) \d+ test_tab2 - Table "public.test_tab2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------------+--------------------------+-----------+----------+---------+----------+-------------+--------------+------------- - id | integer | | not null | | plain | | | - timestamp_col | timestamp with time zone | | | | plain | | | - interval_col | interval | | | | plain | | | - inet_col | inet | | | | main | | | - cidr_col | cidr | | | | main | | | - macaddr_col | macaddr | | | | plain | | | - bit_col | bit(8) | | | | extended | | | - varbit_col | bit varying(8) | | | | extended | | | - box_col | box | | | | plain | | | - circle_col | circle | | | | plain | | | - line_col | line | | | | plain | | | - lseg_col | lseg | | | | plain | | | - path_col | path | | | | extended | | | - polygon_col | polygon | | | | extended | | | + Table "s610.test_tab2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +---------------+-----------------------------+-----------+----------+---------+----------+-------------+--------------+------------- + id | integer | | not null | | plain | | | + timestamp_col | timestamp without time zone | | | | plain | | | + interval_col | interval | | | | plain | | | + inet_col | inet | | | | main | | | + cidr_col | cidr | | | | main | | | + macaddr_col | macaddr | | | | plain | | | + bit_col | bit(8) | | | | extended | | | + varbit_col | bit varying(8) | | | | extended | | | + box_col | box | | | | plain | | | + circle_col | circle | | | | plain | | | + line_col | line | | | | plain | | | + lseg_col | lseg | | | | plain | | | + path_col | path | | | | extended | | | + polygon_col | polygon | | | | extended | | | Indexes: "test_tab2_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab2 | default + s610 | test_tab2 | default (1 row) \d+ test_tab3 - Table "public.test_tab3" + Table "s610.test_tab3" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -206,28 +214,28 @@ Indexes: "test_tab3_pkey" PRIMARY KEY, btree (id) Access method: heap -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab3 | default + s610 | test_tab3 | default (1 row) \d+ test_tab4 - Table "public.test_tab4" + Table "s610.test_tab4" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | text | | not null | | extended | | | old_data | character varying(100) | | not null | | extended | | | Access method: heap -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+-----------+--------------------- - public | test_tab4 | default_insert_only + s610 | test_tab4 | default_insert_only (1 row) \d+ test_tab5 - Table "public.test_tab5" + Table "s610.test_tab5" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------------------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- bigint_col | bigint | | not null | | plain | | | @@ -246,10 +254,10 @@ Indexes: "test_tab5_pkey" PRIMARY KEY, btree (bigint_col) Access method: heap -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+-----------+---------- - public | test_tab5 | default + s610 | test_tab5 | default (1 row) -- Validating data in all tables @@ -285,10 +293,10 @@ SELECT * FROM employee_projects ORDER BY emp_id, project_id; (3 rows) SELECT * FROM products ORDER BY product_id; - product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category -------------+--------------+-------+----------------+--------------+--------------------------+---------------------+------------------------+---------- - 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 17:00:00+05 | - 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 20:00:00+05 | + product_id | product_name | price | stock_quantity | discontinued | product_description | added | updated | category +------------+--------------+-------+----------------+--------------+--------------------------+---------------------+---------------------+---------- + 1 | Product A | 19.99 | 150 | f | Description of Product A | 2023-01-01 12:00:00 | 2023-01-01 12:00:00 | + 2 | Product B | 29.99 | 200 | t | Description of Product B | 2023-02-01 15:00:00 | 2023-02-01 15:00:00 | (2 rows) SELECT * FROM "CaseSensitiveTable" ORDER BY "ID"; @@ -305,9 +313,9 @@ SELECT * FROM test_tab1 ORDER BY id; (1 row) SELECT * FROM test_tab2 ORDER BY id; - id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col -----+------------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- - 1 | 2023-01-01 17:00:00+05 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) + id | timestamp_col | interval_col | inet_col | cidr_col | macaddr_col | bit_col | varbit_col | box_col | circle_col | line_col | lseg_col | path_col | polygon_col +----+---------------------+---------------+-------------+----------------+-------------------+----------+------------+-------------+------------+----------+---------------+---------------+--------------- + 1 | 2023-01-01 12:00:00 | 1 year 2 mons | 192.168.1.1 | 192.168.0.0/24 | 08:00:2b:01:02:03 | 10101010 | 10101010 | (1,1),(0,0) | <(1,1),1> | {1,2,3} | [(0,0),(1,1)] | ((0,0),(1,1)) | ((0,0),(1,1)) (1 row) SELECT * FROM test_tab3 ORDER BY id; diff --git a/t/auto_ddl/6100b_table_validate_and_drop_n2.sql b/t/auto_ddl/6100b_table_validate_and_drop_n2.sql index 31b07b37..313198e0 100644 --- a/t/auto_ddl/6100b_table_validate_and_drop_n2.sql +++ b/t/auto_ddl/6100b_table_validate_and_drop_n2.sql @@ -1,44 +1,45 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated -- AutoDDL validation on n2 to ensure all the DDL/DML performed in the 6100a files on n1 -- was auto replicated to n2. -- In the end, the same objects are dropped. --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; +SET search_path TO s610, public; -- Final validation of all tables along with querying the spock.tables \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); -- Validating data in all tables SELECT * FROM employees ORDER BY emp_id; diff --git a/t/auto_ddl/6100c_table_validate_n1.out b/t/auto_ddl/6100c_table_validate_n1.out index d17ed35b..6fa0c4a9 100644 --- a/t/auto_ddl/6100c_table_validate_n1.out +++ b/t/auto_ddl/6100c_table_validate_n1.out @@ -1,85 +1,98 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- Final AutoDDL validation for the 6100 series on n1 to ensure all the DROP TABLE performed in the 6100b files on n2 -- was auto replicated to n1. -- None of the Tables should exist and spock.tables should not contain any entries for these tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s610, public; +SET -- Final validation of all tables along with querying the spock.tables -- validating all tables dropped on n1 \d+ employees Did not find any relation named "employees". -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ departments Did not find any relation named "departments". -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ projects Did not find any relation named "projects". -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ employee_projects Did not find any relation named "employee_projects". -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ products Did not find any relation named "products". -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ "CaseSensitiveTable" Did not find any relation named ""CaseSensitiveTable"". -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab1 Did not find any relation named "test_tab1". -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab2 Did not find any relation named "test_tab2". -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab3 Did not find any relation named "test_tab3". -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab4 Did not find any relation named "test_tab4". -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d+ test_tab5 Did not find any relation named "test_tab5". -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); nspname | relname | set_name ---------+---------+---------- (0 rows) +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s610 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6100c_table_validate_n1.sql b/t/auto_ddl/6100c_table_validate_n1.sql index bcce4cf8..9be66526 100644 --- a/t/auto_ddl/6100c_table_validate_n1.sql +++ b/t/auto_ddl/6100c_table_validate_n1.sql @@ -1,42 +1,48 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated -- Final AutoDDL validation for the 6100 series on n1 to ensure all the DROP TABLE performed in the 6100b files on n2 -- was auto replicated to n1. -- None of the Tables should exist and spock.tables should not contain any entries for these tables --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s610, public; -- Final validation of all tables along with querying the spock.tables -- validating all tables dropped on n1 \d+ employees -EXECUTE spocktab('employees'); +SELECT * FROM get_table_repset_info('employees'); \d+ departments -execute spocktab('departments'); +SELECT * FROM get_table_repset_info('departments'); \d+ projects -execute spocktab('projects'); +SELECT * FROM get_table_repset_info('projects'); \d+ employee_projects -execute spocktab('employee_projects'); +SELECT * FROM get_table_repset_info('employee_projects'); \d+ products -execute spocktab('products'); +SELECT * FROM get_table_repset_info('products'); \d+ "CaseSensitiveTable" -execute spocktab('CaseSensitiveTable'); +SELECT * FROM get_table_repset_info('CaseSensitiveTable'); \d+ test_tab1 -execute spocktab('test_tab1'); +SELECT * FROM get_table_repset_info('test_tab1'); \d+ test_tab2 -execute spocktab('test_tab2'); +SELECT * FROM get_table_repset_info('test_tab2'); \d+ test_tab3 -execute spocktab('test_tab3'); +SELECT * FROM get_table_repset_info('test_tab3'); \d+ test_tab4 -EXECUTE spocktab('test_tab4'); +SELECT * FROM get_table_repset_info('test_tab4'); \d+ test_tab5 -EXECUTE spocktab('test_tab5'); +SELECT * FROM get_table_repset_info('test_tab5'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s610 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out index 8318ec48..187818e9 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.out @@ -1,11 +1,24 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This script covers the following CREATE TABLE constructs for AutoDDL: -- CREATE TABLE in transactions -- CREATE TABLE AS -- SELECT .. INTO .. FROM EXISTING -- CREATE TABLE LIKE --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +CREATE SCHEMA IF NOT EXISTS s611; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s611 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET ---------------------------- -- Table DDL in transactions ---------------------------- @@ -19,17 +32,17 @@ CREATE TABLE COMMIT; COMMIT \d sub_tx_table0 - Table "public.sub_tx_table0" + Table "s611.sub_tx_table0" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table0_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table0'); --default repset +SELECT * FROM get_table_repset_info('sub_tx_table0'); --default repset nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table0 | default + s611 | sub_tx_table0 | default (1 row) -- DDL within tx, Rollback @@ -44,7 +57,7 @@ ROLLBACK; ROLLBACK \d sub_tx_table0a Did not find any relation named "sub_tx_table0a". -EXECUTE spocktab('sub_tx_table0a'); +SELECT * FROM get_table_repset_info('sub_tx_table0a'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -67,7 +80,7 @@ COMMIT; COMMIT \d sub_tx_table1 Did not find any relation named "sub_tx_table1". -EXECUTE spocktab('sub_tx_table1'); +SELECT * FROM get_table_repset_info('sub_tx_table1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -93,15 +106,15 @@ ROLLBACK COMMIT; COMMIT \d sub_tx_table2 - Table "public.sub_tx_table2" + Table "s611.sub_tx_table2" Column | Type | Collation | Nullable | Default --------+--------+-----------+----------+--------- c | bigint | | | -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); nspname | relname | set_name ---------+---------------+--------------------- - public | sub_tx_table2 | default_insert_only + s611 | sub_tx_table2 | default_insert_only (1 row) BEGIN; @@ -115,7 +128,7 @@ INSERT 0 5 END; COMMIT \d sub_tx_table3 - Table "public.sub_tx_table3" + Table "s611.sub_tx_table3" Column | Type | Collation | Nullable | Default --------+----------+-----------+----------+--------- a | smallint | | not null | @@ -133,10 +146,10 @@ SELECT * FROM sub_tx_table3 order by a; 777 | 777.777 (5 rows) -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table3 | default + s611 | sub_tx_table3 | default (1 row) BEGIN; @@ -158,7 +171,7 @@ ROLLBACK --table sub_tx_table4 should not exist \d sub_tx_table4 Did not find any relation named "sub_tx_table4". -EXECUTE spocktab('sub_tx_table4'); +SELECT * FROM get_table_repset_info('sub_tx_table4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -199,48 +212,50 @@ WARNING: there is no transaction in progress COMMIT -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c should exist, sub_tx_table5b should not \d sub_tx_table5 - Table "public.sub_tx_table5" + Table "s611.sub_tx_table5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5'); -- should be in default_insert_only set - nspname | relname | set_name ----------+---------------+--------------------- - public | sub_tx_table5 | default_insert_only -(1 row) +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- should be in default_insert_only set + nspname | relname | set_name +---------+----------------+--------------------- + s611 | sub_tx_table5 | default_insert_only + s611 | sub_tx_table5a | default + s611 | sub_tx_table5c | default_insert_only +(3 rows) \d sub_tx_table5a - Table "public.sub_tx_table5a" + Table "s611.sub_tx_table5a" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table5a_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table5a'); -- should be in default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- should be in default nspname | relname | set_name ---------+----------------+---------- - public | sub_tx_table5a | default + s611 | sub_tx_table5a | default (1 row) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5c - Table "public.sub_tx_table5c" + Table "s611.sub_tx_table5c" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5c'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------------+--------------------- - public | sub_tx_table5c | default_insert_only + s611 | sub_tx_table5c | default_insert_only (1 row) ----------------------- @@ -273,17 +288,17 @@ WARNING: DDL statement replicated, but could be unsafe. CREATE TABLE AS -- Validate table_ctas1 \d table_ctas1 - Table "public.table_ctas1" + Table "s611.table_ctas1" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | age | integer | | | -EXECUTE spocktab('table_ctas1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas1'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas1 | default_insert_only + s611 | table_ctas1 | default_insert_only (1 row) -- CREATE TABLE AS with specific columns and data @@ -298,7 +313,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate table_ctas2 \d table_ctas2 - Table "public.table_ctas2" + Table "s611.table_ctas2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -306,10 +321,10 @@ ALTER TABLE Indexes: "table_ctas2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas2'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas2'); -- should be in default set nspname | relname | set_name ---------+-------------+---------- - public | table_ctas2 | default + s611 | table_ctas2 | default (1 row) -- CREATE TABLE AS with VALUES clause and primary key @@ -322,7 +337,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate table_ctas3 \d table_ctas3 - Table "public.table_ctas3" + Table "s611.table_ctas3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -330,10 +345,10 @@ ALTER TABLE Indexes: "table_ctas3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas3'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas3'); -- should be in default set nspname | relname | set_name ---------+-------------+---------- - public | table_ctas3 | default + s611 | table_ctas3 | default (1 row) -- CREATE TABLE AS with query and using WITH NO DATA @@ -344,17 +359,17 @@ WARNING: DDL statement replicated, but could be unsafe. CREATE TABLE AS -- Validate table_ctas4 \d table_ctas4 - Table "public.table_ctas4" + Table "s611.table_ctas4" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | double_age | integer | | | -EXECUTE spocktab('table_ctas4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas4'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas4 | default_insert_only + s611 | table_ctas4 | default_insert_only (1 row) -- CREATE TABLE AS with expression @@ -364,15 +379,15 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 10 -- Validate table_ctas5 \d table_ctas5 - Table "public.table_ctas5" + Table "s611.table_ctas5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- num | integer | | | -EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas5'); -- should be in default_insert_only set nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas5 | default_insert_only + s611 | table_ctas5 | default_insert_only (1 row) -- CREATE TABLE AS with explain analyze, redirecting the output to /dev/null so that the varying query plan is not @@ -380,24 +395,19 @@ EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set \o /dev/null EXPLAIN ANALYZE CREATE TABLE table_ctas6 AS SELECT 1 AS a; -INFO: DDL statement replicated. +WARNING: DDL statement replicated, but could be unsafe. \o -/* -TO FIX: -At present, no repset is assigned for table created through EXPLAIN ANALYZE -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=65421352 -*/ -- Validate table_ctas6 \d table_ctas6 - Table "public.table_ctas6" + Table "s611.table_ctas6" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- a | integer | | | -EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set - nspname | relname | set_name ----------+-------------+---------- - public | table_ctas6 | +SELECT * FROM get_table_repset_info('table_ctas6'); -- should be in default_insert_only set + nspname | relname | set_name +---------+-------------+--------------------- + s611 | table_ctas6 | default_insert_only (1 row) ----------------------------------- @@ -426,7 +436,7 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 4 -- Validate table_si1 \d table_si1 - Table "public.table_si1" + Table "s611.table_si1" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | @@ -435,10 +445,10 @@ SELECT 4 column3 | date | | | column4 | boolean | | | -EXECUTE spocktab('table_si1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si1'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si1 | default_insert_only + s611 | table_si1 | default_insert_only (1 row) -- SELECT INTO with specific columns and conditions @@ -447,17 +457,17 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si2 \d table_si2 - Table "public.table_si2" + Table "s611.table_si2" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | column2 | integer | | | -EXECUTE spocktab('table_si2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si2'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si2 | default_insert_only + s611 | table_si2 | default_insert_only (1 row) -- Expected data: (3, 'value3', 30), (4, 'value4', 40) @@ -467,16 +477,16 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si3 \d table_si3 - Table "public.table_si3" + Table "s611.table_si3" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- column4 | boolean | | | count | bigint | | | -EXECUTE spocktab('table_si3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si3'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si3 | default_insert_only + s611 | table_si3 | default_insert_only (1 row) -- Expected data: (TRUE, 2), (FALSE, 2) @@ -486,16 +496,16 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si4 \d table_si4 - Table "public.table_si4" + Table "s611.table_si4" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | -EXECUTE spocktab('table_si4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si4'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si4 | default_insert_only + s611 | table_si4 | default_insert_only (1 row) -- Expected data: (4, 'value4'), (3, 'value3') @@ -524,17 +534,17 @@ WARNING: DDL statement replicated, but could be unsafe. SELECT 2 -- Validate table_si5 \d table_si5 - Table "public.table_si5" + Table "s611.table_si5" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | column1 | text | | | extra_data | character varying(50) | | | -EXECUTE spocktab('table_si5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si5'); -- should be in default_insert_only set nspname | relname | set_name ---------+-----------+--------------------- - public | table_si5 | default_insert_only + s611 | table_si5 | default_insert_only (1 row) -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') @@ -572,16 +582,16 @@ CREATE TABLE -- Validate table_l1 -- Expected columns: col1 (without primary key), col2 (with default 'default_text') \d table_l1 - Table "public.table_l1" + Table "s611.table_l1" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+---------------------- col1 | integer | | not null | col2 | text | | | 'default_text'::text -EXECUTE spocktab('table_l1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l1'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l1 | default_insert_only + s611 | table_l1 | default_insert_only (1 row) -- Create table using LIKE excluding defaults @@ -591,16 +601,16 @@ CREATE TABLE -- Validate table_l2 -- Expected columns: col1 (without primary key), col2 (without default) \d table_l2 - Table "public.table_l2" + Table "s611.table_l2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | col2 | text | | | -EXECUTE spocktab('table_l2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l2'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l2 | default_insert_only + s611 | table_l2 | default_insert_only (1 row) -- Create table using LIKE including all properties @@ -610,7 +620,7 @@ CREATE TABLE -- Validate table_l3 -- Expected columns: col1, col2, col3 (with check constraint and unique constraint) \d table_l3 - Table "public.table_l3" + Table "s611.table_l3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | @@ -621,10 +631,10 @@ Indexes: Check constraints: "chk_col1" CHECK (col1 > 0) -EXECUTE spocktab('table_l3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l3'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l3 | default_insert_only + s611 | table_l3 | default_insert_only (1 row) -- Create table using LIKE excluding constraints @@ -634,17 +644,17 @@ CREATE TABLE -- Validate table_l4 -- Expected columns: col1, col2, col3 (without constraints) \d table_l4 - Table "public.table_l4" + Table "s611.table_l4" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | col2 | text | | | col3 | date | | | -EXECUTE spocktab('table_l4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l4'); -- should be in default_insert_only set nspname | relname | set_name ---------+----------+--------------------- - public | table_l4 | default_insert_only + s611 | table_l4 | default_insert_only (1 row) -- Create table using LIKE including indexes @@ -654,7 +664,7 @@ CREATE TABLE -- Validate table_l5 -- Expected columns: col1 (primary key), col2 (without default), indexes copied \d table_l5 - Table "public.table_l5" + Table "s611.table_l5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | @@ -662,10 +672,10 @@ CREATE TABLE Indexes: "table_l5_pkey" PRIMARY KEY, btree (col1) -EXECUTE spocktab('table_l5'); -- should be in default set +SELECT * FROM get_table_repset_info('table_l5'); -- should be in default set nspname | relname | set_name ---------+----------+---------- - public | table_l5 | default + s611 | table_l5 | default (1 row) -- Insert data into the LIKE created tables to validate defaults and constraints diff --git a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql index 878198e4..011d86d0 100644 --- a/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql +++ b/t/auto_ddl/6111a_table_tx_ctas_selectinto_like.sql @@ -1,3 +1,5 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This script covers the following CREATE TABLE constructs for AutoDDL: -- CREATE TABLE in transactions -- CREATE TABLE AS @@ -5,8 +7,13 @@ -- CREATE TABLE LIKE --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +CREATE SCHEMA IF NOT EXISTS s611; + +GRANT ALL PRIVILEGES ON SCHEMA s611 TO appuser; + +SET ROLE appuser; + +SET search_path TO s611, public; ---------------------------- -- Table DDL in transactions @@ -19,7 +26,7 @@ CREATE TABLE sub_tx_table0 (c int primary key); COMMIT; \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); --default repset +SELECT * FROM get_table_repset_info('sub_tx_table0'); --default repset -- DDL within tx, Rollback -- table will not get created on n1 and therefore nothing should replicate to n2 @@ -29,7 +36,7 @@ CREATE TABLE sub_tx_table0a (c int); ROLLBACK; \d sub_tx_table0a -EXECUTE spocktab('sub_tx_table0a'); +SELECT * FROM get_table_repset_info('sub_tx_table0a'); --DDL within transaction and savepoints and rollback/commit --table sub_tx_table1 will not be created so it should not get replicated @@ -41,7 +48,7 @@ CREATE TABLE sub_tx_table1 (c int); COMMIT; \d sub_tx_table1 -EXECUTE spocktab('sub_tx_table1'); +SELECT * FROM get_table_repset_info('sub_tx_table1'); --ALTERING TABLE within transaction, savepoints, rollback -- After commit, the table should have c column datatype to bigint @@ -54,7 +61,7 @@ BEGIN; COMMIT; \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); BEGIN; CREATE TABLE sub_tx_table3 (a smallint primary key, b real); @@ -64,7 +71,7 @@ END; \d sub_tx_table3 SELECT * FROM sub_tx_table3 order by a; -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); BEGIN; CREATE TABLE sub_tx_table4 (a int4 primary key); @@ -74,7 +81,7 @@ SELECT count(*) from sub_tx_table3;--0 rows ABORT;--rollback --table sub_tx_table4 should not exist \d sub_tx_table4 -EXECUTE spocktab('sub_tx_table4'); +SELECT * FROM get_table_repset_info('sub_tx_table4'); SELECT count(*) from sub_tx_table3;--5 rows, which should also exist on n2 (validated in the 6111b file) -- Nested transactions with multiple savepoints and a mix of rollbacks and commits @@ -92,13 +99,13 @@ COMMIT; -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c should exist, sub_tx_table5b should not \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- should be in default_insert_only set \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); -- should be in default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- should be in default \d sub_tx_table5b -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- should be in default_insert_only set @@ -129,7 +136,7 @@ SELECT id, name FROM table_base1; -- Validate table_ctas1 \d table_ctas1 -EXECUTE spocktab('table_ctas1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas1'); -- should be in default_insert_only set -- CREATE TABLE AS with specific columns and data CREATE TABLE IF NOT EXISTS table_ctas2 AS @@ -141,7 +148,7 @@ ALTER TABLE table_ctas2 ADD PRIMARY KEY (id); -- Validate table_ctas2 \d table_ctas2 -EXECUTE spocktab('table_ctas2'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas2'); -- should be in default set -- CREATE TABLE AS with VALUES clause and primary key CREATE TABLE table_ctas3 (id, value) AS @@ -150,7 +157,7 @@ ALTER TABLE table_ctas3 ADD PRIMARY KEY (id); -- Validate table_ctas3 \d table_ctas3 -EXECUTE spocktab('table_ctas3'); -- should be in default set +SELECT * FROM get_table_repset_info('table_ctas3'); -- should be in default set -- CREATE TABLE AS with query and using WITH NO DATA CREATE TABLE table_ctas4 AS @@ -159,7 +166,7 @@ WHERE age <= 30 WITH NO DATA; -- Validate table_ctas4 \d table_ctas4 -EXECUTE spocktab('table_ctas4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas4'); -- should be in default_insert_only set -- CREATE TABLE AS with expression CREATE TABLE table_ctas5 AS @@ -167,7 +174,7 @@ SELECT generate_series(1, 10) AS num; -- Validate table_ctas5 \d table_ctas5 -EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas5'); -- should be in default_insert_only set -- CREATE TABLE AS with explain analyze, redirecting the output to /dev/null so that the varying query plan is not -- captured in the expected output, to keep our output consistent across runs. @@ -175,14 +182,10 @@ EXECUTE spocktab('table_ctas5'); -- should be in default_insert_only set EXPLAIN ANALYZE CREATE TABLE table_ctas6 AS SELECT 1 AS a; \o -/* -TO FIX: -At present, no repset is assigned for table created through EXPLAIN ANALYZE -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=65421352 -*/ + -- Validate table_ctas6 \d table_ctas6 -EXECUTE spocktab('table_ctas6'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_ctas6'); -- should be in default_insert_only set ----------------------------------- -- Create table using SELECT .. INTO .. @@ -209,14 +212,14 @@ SELECT * INTO table_si1 FROM table_existing1; -- Validate table_si1 \d table_si1 -EXECUTE spocktab('table_si1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si1'); -- should be in default_insert_only set -- SELECT INTO with specific columns and conditions SELECT id, column1, column2 INTO table_si2 FROM table_existing1 WHERE column2 > 20; -- Validate table_si2 \d table_si2 -EXECUTE spocktab('table_si2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si2'); -- should be in default_insert_only set -- Expected data: (3, 'value3', 30), (4, 'value4', 40) -- SELECT INTO with GROUP BY and HAVING @@ -224,7 +227,7 @@ SELECT column4, COUNT(*) AS count INTO table_si3 FROM table_existing1 GROUP BY c -- Validate table_si3 \d table_si3 -EXECUTE spocktab('table_si3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si3'); -- should be in default_insert_only set -- Expected data: (TRUE, 2), (FALSE, 2) -- SELECT INTO with ORDER BY and LIMIT @@ -232,7 +235,7 @@ SELECT id, column1 INTO table_si4 FROM table_existing1 ORDER BY column2 DESC LIM -- Validate table_si4 \d table_si4 -EXECUTE spocktab('table_si4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si4'); -- should be in default_insert_only set -- Expected data: (4, 'value4'), (3, 'value3') -- Complex SELECT INTO with JOIN, GROUP BY, ORDER BY, and LIMIT @@ -258,7 +261,7 @@ LIMIT 3; -- Validate table_si5 \d table_si5 -EXECUTE spocktab('table_si5'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_si5'); -- should be in default_insert_only set -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') --------------------- @@ -293,7 +296,7 @@ CREATE TABLE table_l1 (LIKE table_base1a INCLUDING DEFAULTS INCLUDING CONSTRAINT -- Validate table_l1 -- Expected columns: col1 (without primary key), col2 (with default 'default_text') \d table_l1 -EXECUTE spocktab('table_l1'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l1'); -- should be in default_insert_only set -- Create table using LIKE excluding defaults @@ -302,7 +305,7 @@ CREATE TABLE table_l2 (LIKE table_base1a EXCLUDING DEFAULTS); -- Validate table_l2 -- Expected columns: col1 (without primary key), col2 (without default) \d table_l2 -EXECUTE spocktab('table_l2'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l2'); -- should be in default_insert_only set -- Create table using LIKE including all properties @@ -311,7 +314,7 @@ CREATE TABLE table_l3 (LIKE table_base2 INCLUDING ALL); -- Validate table_l3 -- Expected columns: col1, col2, col3 (with check constraint and unique constraint) \d table_l3 -EXECUTE spocktab('table_l3'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l3'); -- should be in default_insert_only set -- Create table using LIKE excluding constraints CREATE TABLE table_l4 (LIKE table_base2 EXCLUDING CONSTRAINTS); @@ -319,7 +322,7 @@ CREATE TABLE table_l4 (LIKE table_base2 EXCLUDING CONSTRAINTS); -- Validate table_l4 -- Expected columns: col1, col2, col3 (without constraints) \d table_l4 -EXECUTE spocktab('table_l4'); -- should be in default_insert_only set +SELECT * FROM get_table_repset_info('table_l4'); -- should be in default_insert_only set -- Create table using LIKE including indexes CREATE TABLE table_l5 (LIKE table_base1a INCLUDING INDEXES); @@ -327,7 +330,7 @@ CREATE TABLE table_l5 (LIKE table_base1a INCLUDING INDEXES); -- Validate table_l5 -- Expected columns: col1 (primary key), col2 (without default), indexes copied \d table_l5 -EXECUTE spocktab('table_l5'); -- should be in default set +SELECT * FROM get_table_repset_info('table_l5'); -- should be in default set -- Insert data into the LIKE created tables to validate defaults and constraints diff --git a/t/auto_ddl/6111b_table_validate_and_drop_n2.out b/t/auto_ddl/6111b_table_validate_and_drop_n2.out index a45afa3a..438c6f4a 100644 --- a/t/auto_ddl/6111b_table_validate_and_drop_n2.out +++ b/t/auto_ddl/6111b_table_validate_and_drop_n2.out @@ -1,21 +1,28 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6111b - Validate and drop tables on n2 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET -- Validate sub_tx_table0 -- Expected: table exists with column c of type int and primary key \d sub_tx_table0 - Table "public.sub_tx_table0" + Table "s611.sub_tx_table0" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table0_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table0'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Replication set: default nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table0 | default + s611 | sub_tx_table0 | default (1 row) -- Validate sub_tx_table0a @@ -29,21 +36,21 @@ Did not find any relation named "sub_tx_table1". -- Validate sub_tx_table2 -- Expected: table exists with column c of type bigint \d sub_tx_table2 - Table "public.sub_tx_table2" + Table "s611.sub_tx_table2" Column | Type | Collation | Nullable | Default --------+--------+-----------+----------+--------- c | bigint | | | -EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+---------------+--------------------- - public | sub_tx_table2 | default_insert_only + s611 | sub_tx_table2 | default_insert_only (1 row) -- Validate sub_tx_table3 -- Expected: table exists with columns a (smallint, primary key) and b (real) \d sub_tx_table3 - Table "public.sub_tx_table3" + Table "s611.sub_tx_table3" Column | Type | Collation | Nullable | Default --------+----------+-----------+----------+--------- a | smallint | | not null | @@ -51,10 +58,10 @@ EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only Indexes: "sub_tx_table3_pkey" PRIMARY KEY, btree (a) -EXECUTE spocktab('sub_tx_table3'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Replication set: default nspname | relname | set_name ---------+---------------+---------- - public | sub_tx_table3 | default + s611 | sub_tx_table3 | default (1 row) -- Expected data: (0, 0.09561), (42, 324.78), (56, 7.8), (100, 99.097), (777, 777.777) @@ -74,59 +81,61 @@ SELECT * FROM sub_tx_table3 ORDER BY a; Did not find any relation named "sub_tx_table4". -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c, sub_tx_table5b should not exist \d sub_tx_table5 - Table "public.sub_tx_table5" + Table "s611.sub_tx_table5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5'); -- Replication set: default_insert_only - nspname | relname | set_name ----------+---------------+--------------------- - public | sub_tx_table5 | default_insert_only -(1 row) +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- Replication set: default_insert_only + nspname | relname | set_name +---------+----------------+--------------------- + s611 | sub_tx_table5 | default_insert_only + s611 | sub_tx_table5a | default + s611 | sub_tx_table5c | default_insert_only +(3 rows) \d sub_tx_table5a - Table "public.sub_tx_table5a" + Table "s611.sub_tx_table5a" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | not null | Indexes: "sub_tx_table5a_pkey" PRIMARY KEY, btree (c) -EXECUTE spocktab('sub_tx_table5a'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- Replication set: default nspname | relname | set_name ---------+----------------+---------- - public | sub_tx_table5a | default + s611 | sub_tx_table5a | default (1 row) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". \d sub_tx_table5c - Table "public.sub_tx_table5c" + Table "s611.sub_tx_table5c" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- c | integer | | | -EXECUTE spocktab('sub_tx_table5c'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------------+--------------------- - public | sub_tx_table5c | default_insert_only + s611 | sub_tx_table5c | default_insert_only (1 row) -- Validate table_ctas1 -- Expected: table exists with columns id (int), name (varchar), age (int) \d table_ctas1 - Table "public.table_ctas1" + Table "s611.table_ctas1" Column | Type | Collation | Nullable | Default --------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | age | integer | | | -EXECUTE spocktab('table_ctas1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas1'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas1 | default_insert_only + s611 | table_ctas1 | default_insert_only (1 row) -- Expected data: (1, 'Alice', 30), (2, 'Bob', 25), (3, 'Carol', 35) @@ -141,7 +150,7 @@ SELECT * FROM table_ctas1 ORDER BY id; -- Validate table_ctas2 -- Expected: table exists with columns id (int), age (int), primary key on id \d table_ctas2 - Table "public.table_ctas2" + Table "s611.table_ctas2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -149,10 +158,10 @@ SELECT * FROM table_ctas1 ORDER BY id; Indexes: "table_ctas2_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas2'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas2'); -- Replication set: default nspname | relname | set_name ---------+-------------+---------- - public | table_ctas2 | default + s611 | table_ctas2 | default (1 row) -- Expected data: (3, 35) @@ -165,7 +174,7 @@ SELECT * FROM table_ctas2 ORDER BY id; -- Validate table_ctas3 -- Expected: table exists with columns id (int), value (int), primary key on id \d table_ctas3 - Table "public.table_ctas3" + Table "s611.table_ctas3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | not null | @@ -173,10 +182,10 @@ SELECT * FROM table_ctas2 ORDER BY id; Indexes: "table_ctas3_pkey" PRIMARY KEY, btree (id) -EXECUTE spocktab('table_ctas3'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas3'); -- Replication set: default nspname | relname | set_name ---------+-------------+---------- - public | table_ctas3 | default + s611 | table_ctas3 | default (1 row) -- Expected data: (1, 10), (2, 20), (3, 30) @@ -191,17 +200,17 @@ SELECT * FROM table_ctas3 ORDER BY id; -- Validate table_ctas4 -- Expected: table exists with columns id (int), name (varchar), double_age (int), no data \d table_ctas4 - Table "public.table_ctas4" + Table "s611.table_ctas4" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | name | character varying(50) | | | double_age | integer | | | -EXECUTE spocktab('table_ctas4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas4 | default_insert_only + s611 | table_ctas4 | default_insert_only (1 row) -- Expected data: empty (no data) @@ -213,15 +222,15 @@ SELECT * FROM table_ctas4 ORDER BY id; -- Validate table_ctas5 -- Expected: table exists with column num (int) \d table_ctas5 - Table "public.table_ctas5" + Table "s611.table_ctas5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- num | integer | | | -EXECUTE spocktab('table_ctas5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas5'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-------------+--------------------- - public | table_ctas5 | default_insert_only + s611 | table_ctas5 | default_insert_only (1 row) -- Expected data: 1 through 10 @@ -243,15 +252,15 @@ SELECT * FROM table_ctas5 ORDER BY num; -- Validate table_ctas6 -- Expected: table exists with column a (int) \d table_ctas6 - Table "public.table_ctas6" + Table "s611.table_ctas6" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- a | integer | | | -EXECUTE spocktab('table_ctas6'); -- Replication set: default_insert_only - nspname | relname | set_name ----------+-------------+---------- - public | table_ctas6 | +SELECT * FROM get_table_repset_info('table_ctas6'); -- Replication set: default_insert_only + nspname | relname | set_name +---------+-------------+--------------------- + s611 | table_ctas6 | default_insert_only (1 row) -- Expected data: 1 @@ -264,7 +273,7 @@ SELECT * FROM table_ctas6 ORDER BY a; -- Validate table_si1 -- Expected: table exists with columns id (int), column1 (text), column2 (int), column3 (date), column4 (boolean) \d table_si1 - Table "public.table_si1" + Table "s611.table_si1" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | @@ -273,10 +282,10 @@ SELECT * FROM table_ctas6 ORDER BY a; column3 | date | | | column4 | boolean | | | -EXECUTE spocktab('table_si1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si1'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si1 | default_insert_only + s611 | table_si1 | default_insert_only (1 row) -- Expected data: (1, 'value1', 10, '2023-01-01', TRUE), (2, 'value2', 20, '2023-01-02', FALSE), (3, 'value3', 30, '2023-01-03', TRUE), (4, 'value4', 40, '2023-01-04', FALSE) @@ -292,17 +301,17 @@ SELECT * FROM table_si1 ORDER BY id; -- Validate table_si2 -- Expected: table exists with columns id (int), column1 (text), column2 (int) \d table_si2 - Table "public.table_si2" + Table "s611.table_si2" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | column2 | integer | | | -EXECUTE spocktab('table_si2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si2 | default_insert_only + s611 | table_si2 | default_insert_only (1 row) -- Expected data: (3, 'value3', 30), (4, 'value4', 40) @@ -316,16 +325,16 @@ SELECT * FROM table_si2 ORDER BY id; -- Validate table_si3 -- Expected: table exists with columns column4 (boolean), count (int) \d table_si3 - Table "public.table_si3" + Table "s611.table_si3" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- column4 | boolean | | | count | bigint | | | -EXECUTE spocktab('table_si3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si3'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si3 | default_insert_only + s611 | table_si3 | default_insert_only (1 row) -- Expected data: (TRUE, 2), (FALSE, 2) @@ -339,16 +348,16 @@ SELECT * FROM table_si3 ORDER BY column4; -- Validate table_si4 -- Expected: table exists with columns id (int), column1 (text) \d table_si4 - Table "public.table_si4" + Table "s611.table_si4" Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- id | integer | | | column1 | text | | | -EXECUTE spocktab('table_si4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si4 | default_insert_only + s611 | table_si4 | default_insert_only (1 row) -- Expected data: (4, 'value4'), (3, 'value3') @@ -362,17 +371,17 @@ SELECT * FROM table_si4 ORDER BY id; -- Validate table_si5 -- Expected: table exists with columns id (int), column1 (text), extra_data (varchar) \d table_si5 - Table "public.table_si5" + Table "s611.table_si5" Column | Type | Collation | Nullable | Default ------------+-----------------------+-----------+----------+--------- id | integer | | | column1 | text | | | extra_data | character varying(50) | | | -EXECUTE spocktab('table_si5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si5'); -- Replication set: default_insert_only nspname | relname | set_name ---------+-----------+--------------------- - public | table_si5 | default_insert_only + s611 | table_si5 | default_insert_only (1 row) -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') @@ -386,16 +395,16 @@ SELECT * FROM table_si5 ORDER BY id; -- Validate table_l1 -- Expected: table exists with columns col1 (int), col2 (text, default 'default_text') \d table_l1 - Table "public.table_l1" + Table "s611.table_l1" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+---------------------- col1 | integer | | not null | col2 | text | | | 'default_text'::text -EXECUTE spocktab('table_l1'); -- Replication set: default_insert_repset +SELECT * FROM get_table_repset_info('table_l1'); -- Replication set: default_insert_repset nspname | relname | set_name ---------+----------+--------------------- - public | table_l1 | default_insert_only + s611 | table_l1 | default_insert_only (1 row) -- Expected data: (3, 'default_text') @@ -408,16 +417,16 @@ SELECT * FROM table_l1 ORDER BY col1; -- Validate table_l2 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l2 - Table "public.table_l2" + Table "s611.table_l2" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | col2 | text | | | -EXECUTE spocktab('table_l2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l2'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l2 | default_insert_only + s611 | table_l2 | default_insert_only (1 row) -- Expected data: (4, 'text4') @@ -430,7 +439,7 @@ SELECT * FROM table_l2 ORDER BY col1; -- Validate table_l3 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), check constraint, unique constraint \d table_l3 - Table "public.table_l3" + Table "s611.table_l3" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | @@ -441,10 +450,10 @@ Indexes: Check constraints: "chk_col1" CHECK (col1 > 0) -EXECUTE spocktab('table_l3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l3'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l3 | default_insert_only + s611 | table_l3 | default_insert_only (1 row) -- Expected data: (3, 'unique_text3', '2023-01-03') @@ -457,17 +466,17 @@ SELECT * FROM table_l3 ORDER BY col1; -- Validate table_l4 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), no constraints \d table_l4 - Table "public.table_l4" + Table "s611.table_l4" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | | col2 | text | | | col3 | date | | | -EXECUTE spocktab('table_l4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l4'); -- Replication set: default_insert_only nspname | relname | set_name ---------+----------+--------------------- - public | table_l4 | default_insert_only + s611 | table_l4 | default_insert_only (1 row) -- Expected data: (4, 'text4', '2023-01-04') @@ -480,7 +489,7 @@ SELECT * FROM table_l4 ORDER BY col1; -- Validate table_l5 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l5 - Table "public.table_l5" + Table "s611.table_l5" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- col1 | integer | | not null | @@ -488,10 +497,10 @@ SELECT * FROM table_l4 ORDER BY col1; Indexes: "table_l5_pkey" PRIMARY KEY, btree (col1) -EXECUTE spocktab('table_l5'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_l5'); -- Replication set: default nspname | relname | set_name ---------+----------+---------- - public | table_l5 | default + s611 | table_l5 | default (1 row) -- Expected data: (5, ) @@ -547,6 +556,7 @@ NOTICE: drop cascades to table table_ctas5 membership in replication set defaul INFO: DDL statement replicated. DROP TABLE DROP TABLE table_ctas6; +NOTICE: drop cascades to table table_ctas6 membership in replication set default_insert_only INFO: DDL statement replicated. DROP TABLE --cleanup for select into diff --git a/t/auto_ddl/6111b_table_validate_and_drop_n2.sql b/t/auto_ddl/6111b_table_validate_and_drop_n2.sql index 70b97f6c..ae9ebb61 100644 --- a/t/auto_ddl/6111b_table_validate_and_drop_n2.sql +++ b/t/auto_ddl/6111b_table_validate_and_drop_n2.sql @@ -1,12 +1,15 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6111b - Validate and drop tables on n2 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s611, public; -- Validate sub_tx_table0 -- Expected: table exists with column c of type int and primary key \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Replication set: default -- Validate sub_tx_table0a -- Expected: table does not exist @@ -19,12 +22,12 @@ EXECUTE spocktab('sub_tx_table0'); -- Replication set: default -- Validate sub_tx_table2 -- Expected: table exists with column c of type bigint \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Replication set: default_insert_only -- Validate sub_tx_table3 -- Expected: table exists with columns a (smallint, primary key) and b (real) \d sub_tx_table3 -EXECUTE spocktab('sub_tx_table3'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Replication set: default -- Expected data: (0, 0.09561), (42, 324.78), (56, 7.8), (100, 99.097), (777, 777.777) SELECT * FROM sub_tx_table3 ORDER BY a; @@ -34,126 +37,126 @@ SELECT * FROM sub_tx_table3 ORDER BY a; -- Validate sub_tx_table5, sub_tx_table5a, and sub_tx_table5c, sub_tx_table5b should not exist \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5'); -- Replication set: default_insert_only \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); -- Replication set: default +SELECT * FROM get_table_repset_info('sub_tx_table5a'); -- Replication set: default \d sub_tx_table5b \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Replication set: default_insert_only -- Validate table_ctas1 -- Expected: table exists with columns id (int), name (varchar), age (int) \d table_ctas1 -EXECUTE spocktab('table_ctas1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas1'); -- Replication set: default_insert_only -- Expected data: (1, 'Alice', 30), (2, 'Bob', 25), (3, 'Carol', 35) SELECT * FROM table_ctas1 ORDER BY id; -- Validate table_ctas2 -- Expected: table exists with columns id (int), age (int), primary key on id \d table_ctas2 -EXECUTE spocktab('table_ctas2'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas2'); -- Replication set: default -- Expected data: (3, 35) SELECT * FROM table_ctas2 ORDER BY id; -- Validate table_ctas3 -- Expected: table exists with columns id (int), value (int), primary key on id \d table_ctas3 -EXECUTE spocktab('table_ctas3'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_ctas3'); -- Replication set: default -- Expected data: (1, 10), (2, 20), (3, 30) SELECT * FROM table_ctas3 ORDER BY id; -- Validate table_ctas4 -- Expected: table exists with columns id (int), name (varchar), double_age (int), no data \d table_ctas4 -EXECUTE spocktab('table_ctas4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas4'); -- Replication set: default_insert_only -- Expected data: empty (no data) SELECT * FROM table_ctas4 ORDER BY id; -- Validate table_ctas5 -- Expected: table exists with column num (int) \d table_ctas5 -EXECUTE spocktab('table_ctas5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas5'); -- Replication set: default_insert_only -- Expected data: 1 through 10 SELECT * FROM table_ctas5 ORDER BY num; -- Validate table_ctas6 -- Expected: table exists with column a (int) \d table_ctas6 -EXECUTE spocktab('table_ctas6'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_ctas6'); -- Replication set: default_insert_only -- Expected data: 1 SELECT * FROM table_ctas6 ORDER BY a; -- Validate table_si1 -- Expected: table exists with columns id (int), column1 (text), column2 (int), column3 (date), column4 (boolean) \d table_si1 -EXECUTE spocktab('table_si1'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si1'); -- Replication set: default_insert_only -- Expected data: (1, 'value1', 10, '2023-01-01', TRUE), (2, 'value2', 20, '2023-01-02', FALSE), (3, 'value3', 30, '2023-01-03', TRUE), (4, 'value4', 40, '2023-01-04', FALSE) SELECT * FROM table_si1 ORDER BY id; -- Validate table_si2 -- Expected: table exists with columns id (int), column1 (text), column2 (int) \d table_si2 -EXECUTE spocktab('table_si2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si2'); -- Replication set: default_insert_only -- Expected data: (3, 'value3', 30), (4, 'value4', 40) SELECT * FROM table_si2 ORDER BY id; -- Validate table_si3 -- Expected: table exists with columns column4 (boolean), count (int) \d table_si3 -EXECUTE spocktab('table_si3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si3'); -- Replication set: default_insert_only -- Expected data: (TRUE, 2), (FALSE, 2) SELECT * FROM table_si3 ORDER BY column4; -- Validate table_si4 -- Expected: table exists with columns id (int), column1 (text) \d table_si4 -EXECUTE spocktab('table_si4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si4'); -- Replication set: default_insert_only -- Expected data: (4, 'value4'), (3, 'value3') SELECT * FROM table_si4 ORDER BY id; -- Validate table_si5 -- Expected: table exists with columns id (int), column1 (text), extra_data (varchar) \d table_si5 -EXECUTE spocktab('table_si5'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_si5'); -- Replication set: default_insert_only -- Expected data: (1, 'value1', 'extra1'), (3, 'value3', 'extra3') SELECT * FROM table_si5 ORDER BY id; -- Validate table_l1 -- Expected: table exists with columns col1 (int), col2 (text, default 'default_text') \d table_l1 -EXECUTE spocktab('table_l1'); -- Replication set: default_insert_repset +SELECT * FROM get_table_repset_info('table_l1'); -- Replication set: default_insert_repset -- Expected data: (3, 'default_text') SELECT * FROM table_l1 ORDER BY col1; -- Validate table_l2 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l2 -EXECUTE spocktab('table_l2'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l2'); -- Replication set: default_insert_only -- Expected data: (4, 'text4') SELECT * FROM table_l2 ORDER BY col1; -- Validate table_l3 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), check constraint, unique constraint \d table_l3 -EXECUTE spocktab('table_l3'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l3'); -- Replication set: default_insert_only -- Expected data: (3, 'unique_text3', '2023-01-03') SELECT * FROM table_l3 ORDER BY col1; -- Validate table_l4 -- Expected: table exists with columns col1 (int), col2 (text), col3 (date), no constraints \d table_l4 -EXECUTE spocktab('table_l4'); -- Replication set: default_insert_only +SELECT * FROM get_table_repset_info('table_l4'); -- Replication set: default_insert_only -- Expected data: (4, 'text4', '2023-01-04') SELECT * FROM table_l4 ORDER BY col1; -- Validate table_l5 -- Expected: table exists with columns col1 (int, primary key), col2 (text) \d table_l5 -EXECUTE spocktab('table_l5'); -- Replication set: default +SELECT * FROM get_table_repset_info('table_l5'); -- Replication set: default -- Expected data: (5, ) SELECT * FROM table_l5 ORDER BY col1; diff --git a/t/auto_ddl/6111c_table_validate_n1.out b/t/auto_ddl/6111c_table_validate_n1.out index 2e1781a8..b569a341 100644 --- a/t/auto_ddl/6111c_table_validate_n1.out +++ b/t/auto_ddl/6111c_table_validate_n1.out @@ -1,12 +1,19 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- 6111c - Validate tables on n1 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s611, public; +SET -- Validate sub_tx_table0 -- Expected: table does not exist \d sub_tx_table0 Did not find any relation named "sub_tx_table0". -EXECUTE spocktab('sub_tx_table0'); +SELECT * FROM get_table_repset_info('sub_tx_table0'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -15,7 +22,7 @@ EXECUTE spocktab('sub_tx_table0'); -- Expected: table does not exist \d sub_tx_table2 Did not find any relation named "sub_tx_table2". -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -24,7 +31,7 @@ EXECUTE spocktab('sub_tx_table2'); -- Expected: table does not exist \d sub_tx_table3 Did not find any relation named "sub_tx_table3". -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -33,28 +40,28 @@ EXECUTE spocktab('sub_tx_table3'); -- Expected: tables do not exist \d sub_tx_table5 Did not find any relation named "sub_tx_table5". -EXECUTE spocktab('sub_tx_table5'); +SELECT * FROM get_table_repset_info('sub_tx_table5'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5a Did not find any relation named "sub_tx_table5a". -EXECUTE spocktab('sub_tx_table5a'); +SELECT * FROM get_table_repset_info('sub_tx_table5a'); nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5b Did not find any relation named "sub_tx_table5b". -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist nspname | relname | set_name ---------+---------+---------- (0 rows) \d sub_tx_table5c Did not find any relation named "sub_tx_table5c". -EXECUTE spocktab('sub_tx_table5c'); +SELECT * FROM get_table_repset_info('sub_tx_table5c'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -63,7 +70,7 @@ EXECUTE spocktab('sub_tx_table5c'); -- Expected: table does not exist \d table_ctas1 Did not find any relation named "table_ctas1". -EXECUTE spocktab('table_ctas1'); +SELECT * FROM get_table_repset_info('table_ctas1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -72,7 +79,7 @@ EXECUTE spocktab('table_ctas1'); -- Expected: table does not exist \d table_ctas2 Did not find any relation named "table_ctas2". -EXECUTE spocktab('table_ctas2'); +SELECT * FROM get_table_repset_info('table_ctas2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -81,7 +88,7 @@ EXECUTE spocktab('table_ctas2'); -- Expected: table does not exist \d table_ctas3 Did not find any relation named "table_ctas3". -EXECUTE spocktab('table_ctas3'); +SELECT * FROM get_table_repset_info('table_ctas3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -90,7 +97,7 @@ EXECUTE spocktab('table_ctas3'); -- Expected: table does not exist \d table_ctas4 Did not find any relation named "table_ctas4". -EXECUTE spocktab('table_ctas4'); +SELECT * FROM get_table_repset_info('table_ctas4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -99,7 +106,7 @@ EXECUTE spocktab('table_ctas4'); -- Expected: table does not exist \d table_ctas5 Did not find any relation named "table_ctas5". -EXECUTE spocktab('table_ctas5'); +SELECT * FROM get_table_repset_info('table_ctas5'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -108,7 +115,7 @@ EXECUTE spocktab('table_ctas5'); -- Expected: table does not exist \d table_ctas6 Did not find any relation named "table_ctas6". -EXECUTE spocktab('table_ctas6'); +SELECT * FROM get_table_repset_info('table_ctas6'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -117,7 +124,7 @@ EXECUTE spocktab('table_ctas6'); -- Expected: table does not exist \d table_si1 Did not find any relation named "table_si1". -EXECUTE spocktab('table_si1'); +SELECT * FROM get_table_repset_info('table_si1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -126,7 +133,7 @@ EXECUTE spocktab('table_si1'); -- Expected: table does not exist \d table_si2 Did not find any relation named "table_si2". -EXECUTE spocktab('table_si2'); +SELECT * FROM get_table_repset_info('table_si2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -135,7 +142,7 @@ EXECUTE spocktab('table_si2'); -- Expected: table does not exist \d table_si3 Did not find any relation named "table_si3". -EXECUTE spocktab('table_si3'); +SELECT * FROM get_table_repset_info('table_si3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -144,7 +151,7 @@ EXECUTE spocktab('table_si3'); -- Expected: table does not exist \d table_si4 Did not find any relation named "table_si4". -EXECUTE spocktab('table_si4'); +SELECT * FROM get_table_repset_info('table_si4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -153,7 +160,7 @@ EXECUTE spocktab('table_si4'); -- Expected: table does not exist \d table_si5 Did not find any relation named "table_si5". -EXECUTE spocktab('table_si5'); +SELECT * FROM get_table_repset_info('table_si5'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -162,7 +169,7 @@ EXECUTE spocktab('table_si5'); -- Expected: table does not exist \d table_l1 Did not find any relation named "table_l1". -EXECUTE spocktab('table_l1'); +SELECT * FROM get_table_repset_info('table_l1'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -171,7 +178,7 @@ EXECUTE spocktab('table_l1'); -- Expected: table does not exist \d table_l2 Did not find any relation named "table_l2". -EXECUTE spocktab('table_l2'); +SELECT * FROM get_table_repset_info('table_l2'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -180,7 +187,7 @@ EXECUTE spocktab('table_l2'); -- Expected: table does not exist \d table_l3 Did not find any relation named "table_l3". -EXECUTE spocktab('table_l3'); +SELECT * FROM get_table_repset_info('table_l3'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -189,7 +196,7 @@ EXECUTE spocktab('table_l3'); -- Expected: table does not exist \d table_l4 Did not find any relation named "table_l4". -EXECUTE spocktab('table_l4'); +SELECT * FROM get_table_repset_info('table_l4'); nspname | relname | set_name ---------+---------+---------- (0 rows) @@ -198,8 +205,14 @@ EXECUTE spocktab('table_l4'); -- Expected: table does not exist \d table_l5 Did not find any relation named "table_l5". -EXECUTE spocktab('table_l5'); +SELECT * FROM get_table_repset_info('table_l5'); nspname | relname | set_name ---------+---------+---------- (0 rows) +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s611 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6111c_table_validate_n1.sql b/t/auto_ddl/6111c_table_validate_n1.sql index 350f5dc7..8f2c7517 100644 --- a/t/auto_ddl/6111c_table_validate_n1.sql +++ b/t/auto_ddl/6111c_table_validate_n1.sql @@ -1,110 +1,116 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- 6111c - Validate tables on n1 --- Prepared statement for spock.tables so that we can execute it frequently in the script below -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname = $1 ORDER BY relid; +SET ROLE appuser; +SET search_path TO s611, public; -- Validate sub_tx_table0 -- Expected: table does not exist \d sub_tx_table0 -EXECUTE spocktab('sub_tx_table0'); +SELECT * FROM get_table_repset_info('sub_tx_table0'); -- Validate sub_tx_table2 -- Expected: table does not exist \d sub_tx_table2 -EXECUTE spocktab('sub_tx_table2'); +SELECT * FROM get_table_repset_info('sub_tx_table2'); -- Validate sub_tx_table3 -- Expected: table does not exist \d sub_tx_table3 -EXECUTE spocktab('sub_tx_table3'); +SELECT * FROM get_table_repset_info('sub_tx_table3'); -- Validate sub_tx_table5, sub_tx_table5a, sub_tx_table5c, sub_tx_table5b should not exist -- Expected: tables do not exist \d sub_tx_table5 -EXECUTE spocktab('sub_tx_table5'); +SELECT * FROM get_table_repset_info('sub_tx_table5'); \d sub_tx_table5a -EXECUTE spocktab('sub_tx_table5a'); +SELECT * FROM get_table_repset_info('sub_tx_table5a'); \d sub_tx_table5b -EXECUTE spocktab('sub_tx_table5b'); -- should not exist +SELECT * FROM get_table_repset_info('sub_tx_table5b'); -- should not exist \d sub_tx_table5c -EXECUTE spocktab('sub_tx_table5c'); +SELECT * FROM get_table_repset_info('sub_tx_table5c'); -- Validate table_ctas1 -- Expected: table does not exist \d table_ctas1 -EXECUTE spocktab('table_ctas1'); +SELECT * FROM get_table_repset_info('table_ctas1'); -- Validate table_ctas2 -- Expected: table does not exist \d table_ctas2 -EXECUTE spocktab('table_ctas2'); +SELECT * FROM get_table_repset_info('table_ctas2'); -- Validate table_ctas3 -- Expected: table does not exist \d table_ctas3 -EXECUTE spocktab('table_ctas3'); +SELECT * FROM get_table_repset_info('table_ctas3'); -- Validate table_ctas4 -- Expected: table does not exist \d table_ctas4 -EXECUTE spocktab('table_ctas4'); +SELECT * FROM get_table_repset_info('table_ctas4'); -- Validate table_ctas5 -- Expected: table does not exist \d table_ctas5 -EXECUTE spocktab('table_ctas5'); +SELECT * FROM get_table_repset_info('table_ctas5'); -- Validate table_ctas6 -- Expected: table does not exist \d table_ctas6 -EXECUTE spocktab('table_ctas6'); +SELECT * FROM get_table_repset_info('table_ctas6'); -- Validate table_si1 -- Expected: table does not exist \d table_si1 -EXECUTE spocktab('table_si1'); +SELECT * FROM get_table_repset_info('table_si1'); -- Validate table_si2 -- Expected: table does not exist \d table_si2 -EXECUTE spocktab('table_si2'); +SELECT * FROM get_table_repset_info('table_si2'); -- Validate table_si3 -- Expected: table does not exist \d table_si3 -EXECUTE spocktab('table_si3'); +SELECT * FROM get_table_repset_info('table_si3'); -- Validate table_si4 -- Expected: table does not exist \d table_si4 -EXECUTE spocktab('table_si4'); +SELECT * FROM get_table_repset_info('table_si4'); -- Validate table_si5 -- Expected: table does not exist \d table_si5 -EXECUTE spocktab('table_si5'); +SELECT * FROM get_table_repset_info('table_si5'); -- Validate table_l1 -- Expected: table does not exist \d table_l1 -EXECUTE spocktab('table_l1'); +SELECT * FROM get_table_repset_info('table_l1'); -- Validate table_l2 -- Expected: table does not exist \d table_l2 -EXECUTE spocktab('table_l2'); +SELECT * FROM get_table_repset_info('table_l2'); -- Validate table_l3 -- Expected: table does not exist \d table_l3 -EXECUTE spocktab('table_l3'); +SELECT * FROM get_table_repset_info('table_l3'); -- Validate table_l4 -- Expected: table does not exist \d table_l4 -EXECUTE spocktab('table_l4'); +SELECT * FROM get_table_repset_info('table_l4'); -- Validate table_l5 -- Expected: table does not exist \d table_l5 -EXECUTE spocktab('table_l5'); +SELECT * FROM get_table_repset_info('table_l5'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s611 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.out b/t/auto_ddl/6122a_table_range_partitions_n1.out index 7526423f..38f17c8b 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.out +++ b/t/auto_ddl/6122a_table_range_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s612; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s612 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET -------------------------------- -- Range Partitioned Tables -------------------------------- @@ -29,12 +43,12 @@ INSERT INTO sales_range (sale_id, sale_date, amount) VALUES (3, '2022-02-10', 250.00); INSERT 0 3 -- Validate structure and data -EXECUTE spocktab('sales_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) SELECT * FROM sales_range ORDER BY sale_id; -- Expect 3 rows sorted by sale_id @@ -68,12 +82,12 @@ INSERT INTO revenue_range (rev_id, rev_date, revenue) VALUES (102, '2022-05-18', 400.00); INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('revenue_range'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect both parent and child tables in default_insert_only set nspname | relname | set_name ---------+--------------------+--------------------- - public | revenue_range | default_insert_only - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only + s612 | revenue_range | default_insert_only + s612 | revenue_range_2021 | default_insert_only + s612 | revenue_range_2022 | default_insert_only (3 rows) SELECT * FROM revenue_range ORDER BY rev_id; -- Expect 2 rows sorted by rev_id @@ -89,7 +103,7 @@ CREATE TABLE sales_range_2023 PARTITION OF sales_range INFO: DDL statement replicated. CREATE TABLE \d+ sales_range_2023 - Table "public.sales_range_2023" + Table "s612.sales_range_2023" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -101,13 +115,13 @@ Indexes: "sales_range_2023_pkey" PRIMARY KEY, btree (sale_id, sale_date) Access method: heap -EXECUTE spocktab('sales_range'); -- Expect sales_range_2023 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range_2023 in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default - public | sales_range_2023 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default + s612 | sales_range_2023 | default (4 rows) -- Add a primary key to a range partitioned table that initially didn't have one @@ -115,7 +129,7 @@ ALTER TABLE revenue_range ADD PRIMARY KEY (rev_id, rev_date); INFO: DDL statement replicated. ALTER TABLE \d revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default ----------+---------+-----------+----------+--------- rev_id | integer | | not null | @@ -126,17 +140,12 @@ Indexes: "revenue_range_pkey" PRIMARY KEY, btree (rev_id, rev_date) Number of partitions: 2 (Use \d+ to list them.) -/*TO FIX: -At present, adding a parimary key to parent table does not move the partitions to default repset. -To revisit and update outputs once this is addressed -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -*/ -EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set - nspname | relname | set_name ----------+--------------------+--------------------- - public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range and all child partitions to move to default set + nspname | relname | set_name +---------+--------------------+---------- + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default (3 rows) -- Add another partition to the modified table @@ -145,7 +154,7 @@ CREATE TABLE revenue_range_2023 PARTITION OF revenue_range INFO: DDL statement replicated. CREATE TABLE \d+ revenue_range_2023 - Table "public.revenue_range_2023" + Table "s612.revenue_range_2023" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -157,13 +166,13 @@ Indexes: "revenue_range_2023_pkey" PRIMARY KEY, btree (rev_id, rev_date) Access method: heap -EXECUTE spocktab('revenue_range'); -- Expect revenue_range_2023 in default set - nspname | relname | set_name ----------+--------------------+--------------------- - public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only - public | revenue_range_2023 | default +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range_2023 in default set + nspname | relname | set_name +---------+--------------------+---------- + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default + s612 | revenue_range_2023 | default (4 rows) -- Insert data into the newly added partitions @@ -198,12 +207,12 @@ INSERT INTO orders_range (order_id, order_date, customer_id, total) VALUES (1002, '2022-01-10', 2, 1000.00); INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('orders_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+-------------------+---------- - public | orders_range | default - public | orders_range_2021 | default - public | orders_range_2022 | default + s612 | orders_range | default + s612 | orders_range_2021 | default + s612 | orders_range_2022 | default (3 rows) SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows @@ -217,25 +226,25 @@ SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows ALTER TABLE sales_range DETACH PARTITION sales_range_2023; INFO: DDL statement replicated. ALTER TABLE -EXECUTE spocktab('sales_range'); --should still have the repset assigned +SELECT * FROM get_table_repset_info('sales_range'); --should still have the repset assigned nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default - public | sales_range_2023 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default + s612 | sales_range_2023 | default (4 rows) DROP TABLE sales_range_2023; NOTICE: drop cascades to table sales_range_2023 membership in replication set default INFO: DDL statement replicated. DROP TABLE -EXECUTE spocktab('sales_range'); -- validate sales_range_2023 to be removed +SELECT * FROM get_table_repset_info('sales_range'); -- validate sales_range_2023 to be removed nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) -- Create a range partitioned table with default partition @@ -262,12 +271,12 @@ INSERT INTO inventory_range (product_id, product_date, quantity) VALUES (2, '2022-02-10', 100); -- Should go to default partition INSERT 0 2 -- Validate structure and data -EXECUTE spocktab('inventory_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('inventory_range'); -- Expect both parent and child tables in default set nspname | relname | set_name ---------+-------------------------+---------- - public | inventory_range | default - public | inventory_range_2021 | default - public | inventory_range_default | default + s612 | inventory_range | default + s612 | inventory_range_2021 | default + s612 | inventory_range_default | default (3 rows) SELECT * FROM inventory_range ORDER BY product_id; -- Expect 2 rows @@ -311,13 +320,13 @@ ALTER TABLE inventory_range ATTACH PARTITION inventory_standalone FOR VALUES FRO INFO: DDL statement replicated. ALTER TABLE -- Validate structure and data -EXECUTE spocktab('inventory'); -- Expect inventory_standalone to be listed - nspname | relname | set_name ----------+-------------------------+--------------------- - public | inventory_range | default - public | inventory_range_2021 | default - public | inventory_range_default | default - public | inventory_standalone | default_insert_only +SELECT * FROM get_table_repset_info('inventory'); -- Expect inventory_standalone to be listed + nspname | relname | set_name +---------+-------------------------+---------- + s612 | inventory_range | default + s612 | inventory_range_2021 | default + s612 | inventory_range_default | default + s612 | inventory_standalone | default (4 rows) SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row @@ -365,7 +374,7 @@ SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final structure \d+ sales_range - Partitioned table "public.sales_range" + Partitioned table "s612.sales_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -378,7 +387,7 @@ Partitions: sales_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), sales_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ sales_range_2021 - Table "public.sales_range_2021" + Table "s612.sales_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -391,7 +400,7 @@ Indexes: Access method: heap \d+ sales_range_2022 - Table "public.sales_range_2022" + Table "s612.sales_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -406,7 +415,7 @@ Access method: heap \d+ sales_range_2023 Did not find any relation named "sales_range_2023". \d+ revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -420,7 +429,7 @@ Partitions: revenue_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), revenue_range_2023 FOR VALUES FROM ('2023-01-01') TO ('2024-01-01') \d+ revenue_range_2021 - Table "public.revenue_range_2021" + Table "s612.revenue_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -433,7 +442,7 @@ Indexes: Access method: heap \d+ revenue_range_2022 - Table "public.revenue_range_2022" + Table "s612.revenue_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -446,7 +455,7 @@ Indexes: Access method: heap \d orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- order_id | integer | | not null | @@ -459,7 +468,7 @@ Indexes: Number of partitions: 2 (Use \d+ to list them.) \d+ orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -473,7 +482,7 @@ Partitions: orders_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), orders_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ orders_range_2021 - Table "public.orders_range_2021" + Table "s612.orders_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -487,7 +496,7 @@ Indexes: Access method: heap \d+ orders_range_2022 - Table "public.orders_range_2022" + Table "s612.orders_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -501,7 +510,7 @@ Indexes: Access method: heap \d+ inventory_range - Partitioned table "public.inventory_range" + Partitioned table "s612.inventory_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -516,7 +525,7 @@ Partitions: inventory_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01' inventory_range_default DEFAULT \d+ inventory_range_2021 - Table "public.inventory_range_2021" + Table "s612.inventory_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -532,7 +541,7 @@ Check constraints: Access method: heap \d+ inventory_range_default - Table "public.inventory_range_default" + Table "s612.inventory_range_default" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -546,7 +555,7 @@ Indexes: Access method: heap \d+ inventory_standalone - Table "public.inventory_standalone" + Table "s612.inventory_standalone" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | diff --git a/t/auto_ddl/6122a_table_range_partitions_n1.sql b/t/auto_ddl/6122a_table_range_partitions_n1.sql index 77e8b072..b43e0ec5 100644 --- a/t/auto_ddl/6122a_table_range_partitions_n1.sql +++ b/t/auto_ddl/6122a_table_range_partitions_n1.sql @@ -1,6 +1,12 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s612; +GRANT ALL PRIVILEGES ON SCHEMA s612 TO appuser; + +SET ROLE appuser; + +SET search_path TO s612, public; -------------------------------- -- Range Partitioned Tables -------------------------------- @@ -26,7 +32,7 @@ INSERT INTO sales_range (sale_id, sale_date, amount) VALUES (3, '2022-02-10', 250.00); -- Validate structure and data -EXECUTE spocktab('sales_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect both parent and child tables in default set SELECT * FROM sales_range ORDER BY sale_id; -- Expect 3 rows sorted by sale_id -- Create another range partitioned table without primary key @@ -49,30 +55,26 @@ INSERT INTO revenue_range (rev_id, rev_date, revenue) VALUES -- Validate structure and data -EXECUTE spocktab('revenue_range'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM revenue_range ORDER BY rev_id; -- Expect 2 rows sorted by rev_id -- Alter table to add a new partition CREATE TABLE sales_range_2023 PARTITION OF sales_range FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); \d+ sales_range_2023 -EXECUTE spocktab('sales_range'); -- Expect sales_range_2023 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range_2023 in default set -- Add a primary key to a range partitioned table that initially didn't have one ALTER TABLE revenue_range ADD PRIMARY KEY (rev_id, rev_date); \d revenue_range -/*TO FIX: -At present, adding a parimary key to parent table does not move the partitions to default repset. -To revisit and update outputs once this is addressed -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -*/ -EXECUTE spocktab('revenue_range'); -- Expect revenue_range and all child partitions to move to default set + +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range and all child partitions to move to default set -- Add another partition to the modified table CREATE TABLE revenue_range_2023 PARTITION OF revenue_range FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); \d+ revenue_range_2023 -EXECUTE spocktab('revenue_range'); -- Expect revenue_range_2023 in default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range_2023 in default set -- Insert data into the newly added partitions INSERT INTO sales_range (sale_id, sale_date, amount) VALUES @@ -101,14 +103,14 @@ INSERT INTO orders_range (order_id, order_date, customer_id, total) VALUES (1002, '2022-01-10', 2, 1000.00); -- Validate structure and data -EXECUTE spocktab('orders_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect both parent and child tables in default set SELECT * FROM orders_range ORDER BY order_id; -- Expect 2 rows -- Drop a partition ALTER TABLE sales_range DETACH PARTITION sales_range_2023; -EXECUTE spocktab('sales_range'); --should still have the repset assigned +SELECT * FROM get_table_repset_info('sales_range'); --should still have the repset assigned DROP TABLE sales_range_2023; -EXECUTE spocktab('sales_range'); -- validate sales_range_2023 to be removed +SELECT * FROM get_table_repset_info('sales_range'); -- validate sales_range_2023 to be removed -- Create a range partitioned table with default partition CREATE TABLE inventory_range ( @@ -130,7 +132,7 @@ INSERT INTO inventory_range (product_id, product_date, quantity) VALUES (2, '2022-02-10', 100); -- Should go to default partition -- Validate structure and data -EXECUTE spocktab('inventory_range'); -- Expect both parent and child tables in default set +SELECT * FROM get_table_repset_info('inventory_range'); -- Expect both parent and child tables in default set SELECT * FROM inventory_range ORDER BY product_id; -- Expect 2 rows -- Alter the inventory_range table to add a new column and change data type @@ -162,7 +164,7 @@ INSERT INTO inventory_standalone (product_id, product_date, quantity, price) VAL ALTER TABLE inventory_range ATTACH PARTITION inventory_standalone FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); -- Validate structure and data -EXECUTE spocktab('inventory'); -- Expect inventory_standalone to be listed +SELECT * FROM get_table_repset_info('inventory'); -- Expect inventory_standalone to be listed SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final data diff --git a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out index 19ffc155..75e2cb94 100644 --- a/t/auto_ddl/6122b_table_range_partitions_validate_n2.out +++ b/t/auto_ddl/6122b_table_range_partitions_validate_n2.out @@ -1,30 +1,37 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE -EXECUTE spocktab('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set nspname | relname | set_name ---------+------------------+---------- - public | sales_range | default - public | sales_range_2021 | default - public | sales_range_2022 | default + s612 | sales_range | default + s612 | sales_range_2021 | default + s612 | sales_range_2022 | default (3 rows) -EXECUTE spocktab('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set - nspname | relname | set_name ----------+--------------------+--------------------- - public | revenue_range | default - public | revenue_range_2021 | default_insert_only - public | revenue_range_2022 | default_insert_only - public | revenue_range_2023 | default +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set + nspname | relname | set_name +---------+--------------------+---------- + s612 | revenue_range | default + s612 | revenue_range_2021 | default + s612 | revenue_range_2022 | default + s612 | revenue_range_2023 | default (4 rows) -EXECUTE spocktab('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set nspname | relname | set_name ---------+-------------------+---------- - public | orders_range | default - public | orders_range_2021 | default - public | orders_range_2022 | default + s612 | orders_range | default + s612 | orders_range_2021 | default + s612 | orders_range_2022 | default (3 rows) -- Validate final data @@ -66,7 +73,7 @@ SELECT * FROM inventory_standalone ORDER BY product_id; -- Expect 1 row -- Validate final structure \d+ sales_range - Partitioned table "public.sales_range" + Partitioned table "s612.sales_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -79,7 +86,7 @@ Partitions: sales_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), sales_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ sales_range_2021 - Table "public.sales_range_2021" + Table "s612.sales_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -92,7 +99,7 @@ Indexes: Access method: heap \d+ sales_range_2022 - Table "public.sales_range_2022" + Table "s612.sales_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -107,7 +114,7 @@ Access method: heap \d+ sales_range_2023 Did not find any relation named "sales_range_2023". \d+ revenue_range - Partitioned table "public.revenue_range" + Partitioned table "s612.revenue_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -121,7 +128,7 @@ Partitions: revenue_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), revenue_range_2023 FOR VALUES FROM ('2023-01-01') TO ('2024-01-01') \d+ revenue_range_2021 - Table "public.revenue_range_2021" + Table "s612.revenue_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -134,7 +141,7 @@ Indexes: Access method: heap \d+ revenue_range_2022 - Table "public.revenue_range_2022" + Table "s612.revenue_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+---------+-----------+----------+---------+---------+-------------+--------------+------------- rev_id | integer | | not null | | plain | | | @@ -147,7 +154,7 @@ Indexes: Access method: heap \d orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- order_id | integer | | not null | @@ -160,7 +167,7 @@ Indexes: Number of partitions: 2 (Use \d+ to list them.) \d+ orders_range - Partitioned table "public.orders_range" + Partitioned table "s612.orders_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -174,7 +181,7 @@ Partitions: orders_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01'), orders_range_2022 FOR VALUES FROM ('2022-01-01') TO ('2023-01-01') \d+ orders_range_2021 - Table "public.orders_range_2021" + Table "s612.orders_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -188,7 +195,7 @@ Indexes: Access method: heap \d+ orders_range_2022 - Table "public.orders_range_2022" + Table "s612.orders_range_2022" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- order_id | integer | | not null | | plain | | | @@ -202,7 +209,7 @@ Indexes: Access method: heap \d+ inventory_range - Partitioned table "public.inventory_range" + Partitioned table "s612.inventory_range" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -217,7 +224,7 @@ Partitions: inventory_range_2021 FOR VALUES FROM ('2021-01-01') TO ('2022-01-01' inventory_range_default DEFAULT \d+ inventory_range_2021 - Table "public.inventory_range_2021" + Table "s612.inventory_range_2021" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -233,7 +240,7 @@ Check constraints: Access method: heap \d+ inventory_range_default - Table "public.inventory_range_default" + Table "s612.inventory_range_default" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -247,7 +254,7 @@ Indexes: Access method: heap \d+ inventory_standalone - Table "public.inventory_standalone" + Table "s612.inventory_standalone" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+---------+-------------+--------------+------------- product_id | integer | | not null | | plain | | | @@ -268,8 +275,8 @@ INFO: DDL statement replicated. DROP TABLE DROP TABLE revenue_range CASCADE; NOTICE: drop cascades to table revenue_range_2023 membership in replication set default -NOTICE: drop cascades to table revenue_range_2022 membership in replication set default_insert_only -NOTICE: drop cascades to table revenue_range_2021 membership in replication set default_insert_only +NOTICE: drop cascades to table revenue_range_2022 membership in replication set default +NOTICE: drop cascades to table revenue_range_2021 membership in replication set default NOTICE: drop cascades to table revenue_range membership in replication set default INFO: DDL statement replicated. DROP TABLE @@ -280,7 +287,7 @@ NOTICE: drop cascades to table orders_range membership in replication set defau INFO: DDL statement replicated. DROP TABLE DROP TABLE inventory_range CASCADE; -NOTICE: drop cascades to table inventory_standalone membership in replication set default_insert_only +NOTICE: drop cascades to table inventory_standalone membership in replication set default NOTICE: drop cascades to table inventory_range_default membership in replication set default NOTICE: drop cascades to table inventory_range_2021 membership in replication set default NOTICE: drop cascades to table inventory_range membership in replication set default diff --git a/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql b/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql index 11705270..2b464e71 100644 --- a/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql +++ b/t/auto_ddl/6122b_table_range_partitions_validate_n2.sql @@ -1,10 +1,13 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s612, public; -EXECUTE spocktab('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set -EXECUTE spocktab('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set -EXECUTE spocktab('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set +SELECT * FROM get_table_repset_info('sales_range'); -- Expect sales_range, sales_range_2022, sales_range_2021 in default set +SELECT * FROM get_table_repset_info('revenue_range'); -- Expect revenue_range, revenue_range_2023 in default and revenue_range_2021, revenue_range_2022 in default_insert_only set +SELECT * FROM get_table_repset_info('orders_range'); -- Expect orders_range, orders_range_2021, orders_range_2022 in default set -- Validate final data SELECT * FROM sales_range ORDER BY sale_id; -- Expect all rows diff --git a/t/auto_ddl/6122c_table_range_parition_validate_n1.out b/t/auto_ddl/6122c_table_range_parition_validate_n1.out index de06f829..0df49290 100644 --- a/t/auto_ddl/6122c_table_range_parition_validate_n1.out +++ b/t/auto_ddl/6122c_table_range_parition_validate_n1.out @@ -1,11 +1,15 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6122b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) - +SET ROLE appuser; +SET +SET search_path TO s612, public; +SET -- none of these tables should exist. \d+ sales_range Did not find any relation named "sales_range". @@ -15,20 +19,38 @@ Did not find any relation named "sales_range_2021". Did not find any relation named "sales_range_2022". \d+ sales_range_2023 Did not find any relation named "sales_range_2023". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('sales'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ revenue_range Did not find any relation named "revenue_range". \d+ revenue_range_2021 Did not find any relation named "revenue_range_2021". \d+ revenue_range_2022 Did not find any relation named "revenue_range_2022". -\d orders_range -Did not find any relation named "orders_range". +\d revenue_range +Did not find any relation named "revenue_range". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('revenue'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ orders_range Did not find any relation named "orders_range". \d+ orders_range_2021 Did not find any relation named "orders_range_2021". \d+ orders_range_2022 Did not find any relation named "orders_range_2022". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('orders'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ inventory_range Did not find any relation named "inventory_range". \d+ inventory_range_2021 @@ -37,3 +59,15 @@ Did not find any relation named "inventory_range_2021". Did not find any relation named "inventory_range_default". \d+ inventory_standalone Did not find any relation named "inventory_standalone". +--spock.tables should be empty +SELECT * FROM get_table_repset_info('inventory'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s612 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6122c_table_range_parition_validate_n1.sql b/t/auto_ddl/6122c_table_range_parition_validate_n1.sql index b74f52c0..a7993874 100644 --- a/t/auto_ddl/6122c_table_range_parition_validate_n1.sql +++ b/t/auto_ddl/6122c_table_range_parition_validate_n1.sql @@ -1,24 +1,39 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6122b +SET ROLE appuser; + +SET search_path TO s612, public; ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; -- none of these tables should exist. \d+ sales_range \d+ sales_range_2021 \d+ sales_range_2022 \d+ sales_range_2023 +--spock.tables should be empty +SELECT * FROM get_table_repset_info('sales'); \d+ revenue_range \d+ revenue_range_2021 \d+ revenue_range_2022 -\d orders_range +\d revenue_range +--spock.tables should be empty +SELECT * FROM get_table_repset_info('revenue'); \d+ orders_range \d+ orders_range_2021 \d+ orders_range_2022 +--spock.tables should be empty +SELECT * FROM get_table_repset_info('orders'); \d+ inventory_range \d+ inventory_range_2021 \d+ inventory_range_default \d+ inventory_standalone +--spock.tables should be empty +SELECT * FROM get_table_repset_info('inventory'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s612 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6133a_table_list_partitions_n1.out b/t/auto_ddl/6133a_table_list_partitions_n1.out index c4eb705a..2c5f96f5 100644 --- a/t/auto_ddl/6133a_table_list_partitions_n1.out +++ b/t/auto_ddl/6133a_table_list_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s613; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s613 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET ----------------------------- -- List Partitioning ----------------------------- @@ -28,12 +42,12 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES (2, 'West', 200.0), (3, 'East', 150.0); INSERT 0 3 -EXECUTE spocktab('sales_list'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_list'); -- Expect both parent and child tables in default repset nspname | relname | set_name ---------+-----------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default (3 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 3 rows @@ -55,7 +69,7 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES INSERT 0 1 -- Validate structure and data after adding new partition \d+ sales_list_east - Table "public.sales_list_east" + Table "s613.sales_list_east" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -68,7 +82,7 @@ Indexes: Access method: heap \d+ sales_list_west - Table "public.sales_list_west" + Table "s613.sales_list_west" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -81,7 +95,7 @@ Indexes: Access method: heap \d+ sales_list_north - Table "public.sales_list_north" + Table "s613.sales_list_north" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -94,7 +108,7 @@ Indexes: Access method: heap \d+ sales_list - Partitioned table "public.sales_list" + Partitioned table "s613.sales_list" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -107,13 +121,13 @@ Partitions: sales_list_east FOR VALUES IN ('East'), sales_list_north FOR VALUES IN ('North'), sales_list_west FOR VALUES IN ('West') -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed nspname | relname | set_name ---------+------------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default - public | sales_list_north | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default + s613 | sales_list_north | default (4 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows @@ -125,39 +139,114 @@ SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows 4 | North | 250.0 (4 rows) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -- Create a list partitioned table without primary key CREATE TABLE products_list ( product_id INT, product_category TEXT, product_name TEXT ) PARTITION BY LIST (product_category); - +INFO: DDL statement replicated. +CREATE TABLE -- Add partitions to the products_list table CREATE TABLE products_list_electronics PARTITION OF products_list FOR VALUES IN ('Electronics'); +INFO: DDL statement replicated. +CREATE TABLE CREATE TABLE products_list_clothing PARTITION OF products_list FOR VALUES IN ('Clothing'); - +INFO: DDL statement replicated. +CREATE TABLE -- Insert data into the products_list table INSERT INTO products_list (product_id, product_category, product_name) VALUES (1, 'Electronics', 'Laptop'), (2, 'Clothing', 'Shirt'), (3, 'Electronics', 'Smartphone'); - +INSERT 0 3 -- Validate structure and data \d+ products_list -EXECUTE spocktab('products_list'); -- Expect both parent and child tables in default_insert_only set + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | | | plain | | | + product_category | text | | | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + +SELECT * FROM get_table_repset_info('products_list'); -- Expect both parent and child tables in default_insert_only set + nspname | relname | set_name +---------+---------------------------+--------------------- + s613 | products_list | default_insert_only + s613 | products_list_electronics | default_insert_only + s613 | products_list_clothing | default_insert_only +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) -- Alter the products_list table to add a primary key ALTER TABLE products_list ADD PRIMARY KEY (product_id, product_category); - +INFO: DDL statement replicated. +ALTER TABLE -- Validate structure and data after adding primary key \d+ products_list + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Indexes: + "products_list_pkey" PRIMARY KEY, btree (product_id, product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + \d+ products_list_clothing + Table "s613.products_list_clothing" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Clothing') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Clothing'::text)) +Indexes: + "products_list_clothing_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect the replication set to change to default + Table "s613.products_list_electronics" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Electronics') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Electronics'::text)) +Indexes: + "products_list_electronics_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + +SELECT * FROM get_table_repset_info('products_list'); -- Expect the replication set to change to default + nspname | relname | set_name +---------+---------------------------+---------- + s613 | products_list | default + s613 | products_list_electronics | default + s613 | products_list_clothing | default +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -*/ + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) + diff --git a/t/auto_ddl/6133a_table_list_partitions_n1.sql b/t/auto_ddl/6133a_table_list_partitions_n1.sql index 5f393196..152e90ca 100644 --- a/t/auto_ddl/6133a_table_list_partitions_n1.sql +++ b/t/auto_ddl/6133a_table_list_partitions_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s613; + +GRANT ALL PRIVILEGES ON SCHEMA s613 TO appuser; + +SET ROLE appuser; + +SET search_path TO s613, public; ----------------------------- -- List Partitioning @@ -25,7 +33,7 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES (2, 'West', 200.0), (3, 'East', 150.0); -EXECUTE spocktab('sales_list'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_list'); -- Expect both parent and child tables in default repset SELECT * FROM sales_list ORDER BY sale_id; -- Expect 3 rows -- Alter the sales_list table to add a new partition @@ -41,10 +49,8 @@ INSERT INTO sales_list (sale_id, sale_region, sale_amount) VALUES \d+ sales_list_west \d+ sales_list_north \d+ sales_list -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -- Create a list partitioned table without primary key CREATE TABLE products_list ( product_id INT, @@ -66,7 +72,7 @@ INSERT INTO products_list (product_id, product_category, product_name) VALUES -- Validate structure and data \d+ products_list -EXECUTE spocktab('products_list'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_list'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -- Alter the products_list table to add a primary key @@ -76,6 +82,6 @@ ALTER TABLE products_list ADD PRIMARY KEY (product_id, product_category); \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect the replication set to change to default +SELECT * FROM get_table_repset_info('products_list'); -- Expect the replication set to change to default SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows -*/ + diff --git a/t/auto_ddl/6133b_table_list_partitions_validate_n2.out b/t/auto_ddl/6133b_table_list_partitions_validate_n2.out index 59c97001..3e70b40f 100644 --- a/t/auto_ddl/6133b_table_list_partitions_validate_n2.out +++ b/t/auto_ddl/6133b_table_list_partitions_validate_n2.out @@ -1,9 +1,16 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET \d+ sales_list_east - Table "public.sales_list_east" + Table "s613.sales_list_east" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -16,7 +23,7 @@ Indexes: Access method: heap \d+ sales_list_west - Table "public.sales_list_west" + Table "s613.sales_list_west" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -29,7 +36,7 @@ Indexes: Access method: heap \d+ sales_list_north - Table "public.sales_list_north" + Table "s613.sales_list_north" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -42,7 +49,7 @@ Indexes: Access method: heap \d+ sales_list - Partitioned table "public.sales_list" + Partitioned table "s613.sales_list" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description -------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- sale_id | integer | | not null | | plain | | | @@ -55,13 +62,13 @@ Partitions: sales_list_east FOR VALUES IN ('East'), sales_list_north FOR VALUES IN ('North'), sales_list_west FOR VALUES IN ('West') -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed nspname | relname | set_name ---------+------------------+---------- - public | sales_list | default - public | sales_list_east | default - public | sales_list_west | default - public | sales_list_north | default + s613 | sales_list | default + s613 | sales_list_east | default + s613 | sales_list_west | default + s613 | sales_list_north | default (4 rows) SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows @@ -81,13 +88,65 @@ NOTICE: drop cascades to table sales_list_east membership in replication set de NOTICE: drop cascades to table sales_list membership in replication set default INFO: DDL statement replicated. DROP TABLE -/* -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 \d+ products_list + Partitioned table "s613.products_list" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition key: LIST (product_category) +Indexes: + "products_list_pkey" PRIMARY KEY, btree (product_id, product_category) +Partitions: products_list_clothing FOR VALUES IN ('Clothing'), + products_list_electronics FOR VALUES IN ('Electronics') + \d+ products_list_clothing + Table "s613.products_list_clothing" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Clothing') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Clothing'::text)) +Indexes: + "products_list_clothing_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect all to be in default repset + Table "s613.products_list_electronics" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +------------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- + product_id | integer | | not null | | plain | | | + product_category | text | | not null | | extended | | | + product_name | text | | | | extended | | | +Partition of: products_list FOR VALUES IN ('Electronics') +Partition constraint: ((product_category IS NOT NULL) AND (product_category = 'Electronics'::text)) +Indexes: + "products_list_electronics_pkey" PRIMARY KEY, btree (product_id, product_category) +Access method: heap + +SELECT * FROM get_table_repset_info('products_list'); -- Expect all to be in default repset + nspname | relname | set_name +---------+---------------------------+---------- + s613 | products_list | default + s613 | products_list_electronics | default + s613 | products_list_clothing | default +(3 rows) + SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows + product_id | product_category | product_name +------------+------------------+-------------- + 1 | Electronics | Laptop + 2 | Clothing | Shirt + 3 | Electronics | Smartphone +(3 rows) + --exercise ddl on n2 DROP TABLE products_list CASCADE; -*/ +NOTICE: drop cascades to table products_list_clothing membership in replication set default +NOTICE: drop cascades to table products_list_electronics membership in replication set default +NOTICE: drop cascades to table products_list membership in replication set default +INFO: DDL statement replicated. +DROP TABLE diff --git a/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql b/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql index 16ca3df8..208890c2 100644 --- a/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql +++ b/t/auto_ddl/6133b_table_list_partitions_validate_n2.sql @@ -1,22 +1,24 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SET ROLE appuser; + +SET search_path TO s613, public; \d+ sales_list_east \d+ sales_list_west \d+ sales_list_north \d+ sales_list -EXECUTE spocktab('sales_list'); -- Expect the new partition to be listed +SELECT * FROM get_table_repset_info('sales_list'); -- Expect the new partition to be listed SELECT * FROM sales_list ORDER BY sale_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE sales_list CASCADE; -/* -https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 + \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -EXECUTE spocktab('products_list'); -- Expect all to be in default repset +SELECT * FROM get_table_repset_info('products_list'); -- Expect all to be in default repset SELECT * FROM products_list ORDER BY product_id; -- Expect 3 rows --exercise ddl on n2 DROP TABLE products_list CASCADE; -*/ + diff --git a/t/auto_ddl/6133c_table_list_parition_validate_n1.out b/t/auto_ddl/6133c_table_list_parition_validate_n1.out index ff666941..a9a85c60 100644 --- a/t/auto_ddl/6133c_table_list_parition_validate_n1.out +++ b/t/auto_ddl/6133c_table_list_parition_validate_n1.out @@ -1,12 +1,16 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6133b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) - --- none of these tables should exist. +SET ROLE appuser; +SET +SET search_path TO s613, public; +SET +--none of these should exist \d sales_list_east Did not find any relation named "sales_list_east". \d sales_list_west @@ -15,8 +19,25 @@ Did not find any relation named "sales_list_west". Did not find any relation named "sales_list_north". \d sales_list Did not find any relation named "sales_list". -/* +SELECT * FROM get_table_repset_info('sales'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d+ products_list +Did not find any relation named "products_list". \d+ products_list_clothing +Did not find any relation named "products_list_clothing". \d+ products_list_electronics -*/ +Did not find any relation named "products_list_electronics". +SELECT * FROM get_table_repset_info('products'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s613 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6133c_table_list_parition_validate_n1.sql b/t/auto_ddl/6133c_table_list_parition_validate_n1.sql index 4157dd72..33c1ab81 100644 --- a/t/auto_ddl/6133c_table_list_parition_validate_n1.sql +++ b/t/auto_ddl/6133c_table_list_parition_validate_n1.sql @@ -1,16 +1,23 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6133b +SET ROLE appuser; + +SET search_path TO s613, public; +--none of these should exist ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; --- none of these tables should exist. \d sales_list_east \d sales_list_west \d sales_list_north \d sales_list +SELECT * FROM get_table_repset_info('sales'); -/* \d+ products_list \d+ products_list_clothing \d+ products_list_electronics -*/ \ No newline at end of file +SELECT * FROM get_table_repset_info('products'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s613 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.out b/t/auto_ddl/6144a_table_hash_partitions_n1.out index 84d310bd..2d84b0dd 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.out +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s614; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s614 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET ----------------------------- -- Hash Partitioning ----------------------------- @@ -37,14 +51,14 @@ INSERT INTO sales_hash (sale_id, sale_date, sale_amount) VALUES (3, '2023-01-03', 150.0), (4, '2023-01-04', 250.0); INSERT 0 4 -EXECUTE spocktab('sales_hash'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect both parent and child tables in default repset nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -58,7 +72,7 @@ SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Validate structure and data after adding new partition \d sales_hash_1 - Table "public.sales_hash_1" + Table "s614.sales_hash_1" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -69,7 +83,7 @@ Indexes: "sales_hash_1_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_2 - Table "public.sales_hash_2" + Table "s614.sales_hash_2" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -80,7 +94,7 @@ Indexes: "sales_hash_2_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_3 - Table "public.sales_hash_3" + Table "s614.sales_hash_3" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -91,7 +105,7 @@ Indexes: "sales_hash_3_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_4 - Table "public.sales_hash_4" + Table "s614.sales_hash_4" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -102,7 +116,7 @@ Indexes: "sales_hash_4_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash - Partitioned table "public.sales_hash" + Partitioned table "s614.sales_hash" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -113,14 +127,14 @@ Indexes: "sales_hash_pkey" PRIMARY KEY, btree (sale_id, sale_date) Number of partitions: 4 (Use \d+ to list them.) -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -166,7 +180,7 @@ INSERT INTO products_hash (product_id, product_date, product_name) VALUES INSERT 0 4 -- Validate structure and data \d+ products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------------+---------+-----------+----------+---------+----------+-------------+--------------+------------- product_id | integer | | | | plain | | | @@ -178,14 +192,14 @@ Partitions: products_hash_1 FOR VALUES WITH (modulus 4, remainder 0), products_hash_3 FOR VALUES WITH (modulus 4, remainder 2), products_hash_4 FOR VALUES WITH (modulus 4, remainder 3) -EXECUTE spocktab('products_hash'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_hash'); -- Expect both parent and child tables in default_insert_only set nspname | relname | set_name ---------+-----------------+--------------------- - public | products_hash | default_insert_only - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only + s614 | products_hash | default_insert_only + s614 | products_hash_1 | default_insert_only + s614 | products_hash_2 | default_insert_only + s614 | products_hash_3 | default_insert_only + s614 | products_hash_4 | default_insert_only (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows @@ -203,7 +217,7 @@ INFO: DDL statement replicated. ALTER TABLE -- Validate structure and data after adding primary key \d products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -215,7 +229,7 @@ Indexes: Number of partitions: 4 (Use \d+ to list them.) \d products_hash_1 - Table "public.products_hash_1" + Table "s614.products_hash_1" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -226,7 +240,7 @@ Indexes: "products_hash_1_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_2 - Table "public.products_hash_2" + Table "s614.products_hash_2" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -237,7 +251,7 @@ Indexes: "products_hash_2_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_3 - Table "public.products_hash_3" + Table "s614.products_hash_3" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -248,7 +262,7 @@ Indexes: "products_hash_3_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_4 - Table "public.products_hash_4" + Table "s614.products_hash_4" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -258,18 +272,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default - nspname | relname | set_name ----------+-----------------+--------------------- - public | products_hash | default - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to change to default + nspname | relname | set_name +---------+-----------------+---------- + s614 | products_hash | default + s614 | products_hash_1 | default + s614 | products_hash_2 | default + s614 | products_hash_3 | default + s614 | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144a_table_hash_partitions_n1.sql b/t/auto_ddl/6144a_table_hash_partitions_n1.sql index d0892414..9293960e 100644 --- a/t/auto_ddl/6144a_table_hash_partitions_n1.sql +++ b/t/auto_ddl/6144a_table_hash_partitions_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s614; + +GRANT ALL PRIVILEGES ON SCHEMA s614 TO appuser; + +SET ROLE appuser; + +SET search_path TO s614, public; ----------------------------- -- Hash Partitioning @@ -30,7 +38,7 @@ INSERT INTO sales_hash (sale_id, sale_date, sale_amount) VALUES (3, '2023-01-03', 150.0), (4, '2023-01-04', 250.0); -EXECUTE spocktab('sales_hash'); -- Expect both parent and child tables in default repset +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect both parent and child tables in default repset SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Validate structure and data after adding new partition @@ -39,7 +47,7 @@ SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows \d sales_hash_3 \d sales_hash_4 \d sales_hash -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows -- Create a hash partitioned table without primary key @@ -68,7 +76,7 @@ INSERT INTO products_hash (product_id, product_date, product_name) VALUES -- Validate structure and data \d+ products_hash -EXECUTE spocktab('products_hash'); -- Expect both parent and child tables in default_insert_only set +SELECT * FROM get_table_repset_info('products_hash'); -- Expect both parent and child tables in default_insert_only set SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows -- Alter the products_hash table to add a primary key @@ -80,9 +88,6 @@ ALTER TABLE products_hash ADD PRIMARY KEY (product_id, product_date); \d products_hash_2 \d products_hash_3 \d products_hash_4 -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default + +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to change to default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out index f7873c23..dc9c6777 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.out @@ -1,10 +1,17 @@ --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET -- Validate structure and data after adding new partition \d sales_hash_1 - Table "public.sales_hash_1" + Table "s614.sales_hash_1" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -15,7 +22,7 @@ Indexes: "sales_hash_1_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_2 - Table "public.sales_hash_2" + Table "s614.sales_hash_2" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -26,7 +33,7 @@ Indexes: "sales_hash_2_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_3 - Table "public.sales_hash_3" + Table "s614.sales_hash_3" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -37,7 +44,7 @@ Indexes: "sales_hash_3_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash_4 - Table "public.sales_hash_4" + Table "s614.sales_hash_4" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -48,7 +55,7 @@ Indexes: "sales_hash_4_pkey" PRIMARY KEY, btree (sale_id, sale_date) \d sales_hash - Partitioned table "public.sales_hash" + Partitioned table "s614.sales_hash" Column | Type | Collation | Nullable | Default -------------+---------+-----------+----------+--------- sale_id | integer | | not null | @@ -59,14 +66,14 @@ Indexes: "sales_hash_pkey" PRIMARY KEY, btree (sale_id, sale_date) Number of partitions: 4 (Use \d+ to list them.) -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed nspname | relname | set_name ---------+--------------+---------- - public | sales_hash | default - public | sales_hash_1 | default - public | sales_hash_2 | default - public | sales_hash_3 | default - public | sales_hash_4 | default + s614 | sales_hash | default + s614 | sales_hash_1 | default + s614 | sales_hash_2 | default + s614 | sales_hash_3 | default + s614 | sales_hash_4 | default (5 rows) SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows @@ -88,7 +95,7 @@ NOTICE: drop cascades to table sales_hash membership in replication set default INFO: DDL statement replicated. DROP TABLE \d products_hash - Partitioned table "public.products_hash" + Partitioned table "s614.products_hash" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -100,7 +107,7 @@ Indexes: Number of partitions: 4 (Use \d+ to list them.) \d products_hash_1 - Table "public.products_hash_1" + Table "s614.products_hash_1" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -111,7 +118,7 @@ Indexes: "products_hash_1_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_2 - Table "public.products_hash_2" + Table "s614.products_hash_2" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -122,7 +129,7 @@ Indexes: "products_hash_2_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_3 - Table "public.products_hash_3" + Table "s614.products_hash_3" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -133,7 +140,7 @@ Indexes: "products_hash_3_pkey" PRIMARY KEY, btree (product_id, product_date) \d products_hash_4 - Table "public.products_hash_4" + Table "s614.products_hash_4" Column | Type | Collation | Nullable | Default --------------+---------+-----------+----------+--------- product_id | integer | | not null | @@ -143,18 +150,14 @@ Partition of: products_hash FOR VALUES WITH (modulus 4, remainder 3) Indexes: "products_hash_4_pkey" PRIMARY KEY, btree (product_id, product_date) -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default - nspname | relname | set_name ----------+-----------------+--------------------- - public | products_hash | default - public | products_hash_1 | default_insert_only - public | products_hash_2 | default_insert_only - public | products_hash_3 | default_insert_only - public | products_hash_4 | default_insert_only +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to be default + nspname | relname | set_name +---------+-----------------+---------- + s614 | products_hash | default + s614 | products_hash_1 | default + s614 | products_hash_2 | default + s614 | products_hash_3 | default + s614 | products_hash_4 | default (5 rows) SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows @@ -168,10 +171,10 @@ SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE products_hash CASCADE; -NOTICE: drop cascades to table products_hash_4 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_3 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_2 membership in replication set default_insert_only -NOTICE: drop cascades to table products_hash_1 membership in replication set default_insert_only +NOTICE: drop cascades to table products_hash_4 membership in replication set default +NOTICE: drop cascades to table products_hash_3 membership in replication set default +NOTICE: drop cascades to table products_hash_2 membership in replication set default +NOTICE: drop cascades to table products_hash_1 membership in replication set default NOTICE: drop cascades to table products_hash membership in replication set default INFO: DDL statement replicated. DROP TABLE diff --git a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql index 48e69cec..0cdb67c6 100644 --- a/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql +++ b/t/auto_ddl/6144b_table_hash_partitions_validate_n2.sql @@ -1,6 +1,9 @@ --This file will run on n2 and validate all the replicated tables data, structure and replication sets they're in --- Prepared statement for spock.tables to list parent and child tables as parent table name will be contained in partition name -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s614, public; -- Validate structure and data after adding new partition \d sales_hash_1 @@ -8,7 +11,7 @@ PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE re \d sales_hash_3 \d sales_hash_4 \d sales_hash -EXECUTE spocktab('sales_hash'); -- Expect all partitions to be listed +SELECT * FROM get_table_repset_info('sales_hash'); -- Expect all partitions to be listed SELECT * FROM sales_hash ORDER BY sale_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE sales_hash CASCADE; @@ -18,11 +21,8 @@ DROP TABLE sales_hash CASCADE; \d products_hash_2 \d products_hash_3 \d products_hash_4 -/*TO FIX: -commenting this test case due to https://github.com/orgs/pgEdge/projects/6/views/7?filterQuery=category%3AAutoDDL+&visibleFields=%5B%22Title%22%2C%22Assignees%22%2C%22Status%22%2C77649763%5D&pane=issue&itemId=69962278 -only the parent table moves to default repset, all partitions continue to stay in default_insert_only -*/ -EXECUTE spocktab('products_hash'); -- Expect the replication set to change to default + +SELECT * FROM get_table_repset_info('products_hash'); -- Expect the replication set to be default SELECT * FROM products_hash ORDER BY product_id; -- Expect 4 rows --exercise ddl on n2 DROP TABLE products_hash CASCADE; diff --git a/t/auto_ddl/6144c_table_hash_parition_validate_n1.out b/t/auto_ddl/6144c_table_hash_parition_validate_n1.out index 97e2a6c9..7b3bfb4d 100644 --- a/t/auto_ddl/6144c_table_hash_parition_validate_n1.out +++ b/t/auto_ddl/6144c_table_hash_parition_validate_n1.out @@ -1,11 +1,15 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6144b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) +SET ROLE appuser; +SET +SET search_path TO s614, public; +SET -- none of these tables should exist. \d sales_hash_1 Did not find any relation named "sales_hash_1". @@ -17,8 +21,11 @@ Did not find any relation named "sales_hash_3". Did not find any relation named "sales_hash_4". \d sales_hash Did not find any relation named "sales_hash". -/* -*/ +SELECT * FROM get_table_repset_info('sales_hash'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + \d products_hash Did not find any relation named "products_hash". \d products_hash_1 @@ -29,3 +36,14 @@ Did not find any relation named "products_hash_2". Did not find any relation named "products_hash_3". \d products_hash_4 Did not find any relation named "products_hash_4". +SELECT * FROM get_table_repset_info('products_hash'); + nspname | relname | set_name +---------+---------+---------- +(0 rows) + +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s614 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql b/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql index b0a6a2f8..8e6908ca 100644 --- a/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql +++ b/t/auto_ddl/6144c_table_hash_parition_validate_n1.sql @@ -1,19 +1,26 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6144b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s614, public; -- none of these tables should exist. \d sales_hash_1 \d sales_hash_2 \d sales_hash_3 \d sales_hash_4 \d sales_hash +SELECT * FROM get_table_repset_info('sales_hash'); -/* -*/ \d products_hash \d products_hash_1 \d products_hash_2 \d products_hash_3 \d products_hash_4 +SELECT * FROM get_table_repset_info('products_hash'); + +RESET ROLE; +--dropping the schema +DROP SCHEMA s614 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6155a_index_n1.out b/t/auto_ddl/6155a_index_n1.out index e829596c..957c4c36 100644 --- a/t/auto_ddl/6155a_index_n1.out +++ b/t/auto_ddl/6155a_index_n1.out @@ -1,6 +1,20 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s615; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s615 TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET ----------------------------- -- INDEX tests ----------------------------- @@ -41,20 +55,20 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *product_catalog_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------+-------+-------+----------------- - public | brin_product_catalog_idx | index | rocky | product_catalog - public | btree_product_catalog_idx | index | rocky | product_catalog - public | gin_product_catalog_idx | index | rocky | product_catalog - public | gist_product_catalog_idx | index | rocky | product_catalog - public | hash_product_catalog_idx | index | rocky | product_catalog - public | product_catalog_pkey | index | rocky | product_catalog - public | spgist_product_catalog_idx | index | rocky | product_catalog + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------+-------+---------+----------------- + s615 | brin_product_catalog_idx | index | appuser | product_catalog + s615 | btree_product_catalog_idx | index | appuser | product_catalog + s615 | gin_product_catalog_idx | index | appuser | product_catalog + s615 | gist_product_catalog_idx | index | appuser | product_catalog + s615 | hash_product_catalog_idx | index | appuser | product_catalog + s615 | product_catalog_pkey | index | appuser | product_catalog + s615 | spgist_product_catalog_idx | index | appuser | product_catalog (7 rows) \d product_catalog - Table "public.product_catalog" + Table "s615.product_catalog" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -94,15 +108,15 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *_emp_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------+-------+-------+-------------------- - public | unique_emp_email_idx | index | rocky | employee_directory - public | unique_emp_id_idx | index | rocky | employee_directory + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------+-------+---------+-------------------- + s615 | unique_emp_email_idx | index | appuser | employee_directory + s615 | unique_emp_id_idx | index | appuser | employee_directory (2 rows) \d employee_directory - Table "public.employee_directory" + Table "s615.employee_directory" Column | Type | Collation | Nullable | Default -----------+------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -135,11 +149,11 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *sales_* - List of relations - Schema | Name | Type | Owner | Table ---------+-----------------------+-------+-------+------------ - public | func_sales_amount_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+-----------------------+-------+---------+------------ + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (2 rows) -- Altering Indexes @@ -149,12 +163,12 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate indexes \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+------------------------+-------+-------+------------ - public | alter_sales_region_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+------------------------+-------+---------+------------ + s615 | alter_sales_region_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (3 rows) -- Alter the index to rename it @@ -171,18 +185,18 @@ INFO: DDL statement replicated. CREATE INDEX -- Validate index \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+--------------------------+-------+-------+------------ - public | conditional_sales_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | partial_sales_idx | index | rocky | sales_data - public | renamed_sales_region_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+--------------------------+-------+---------+------------ + s615 | conditional_sales_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | partial_sales_idx | index | appuser | sales_data + s615 | renamed_sales_region_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (5 rows) \d sales_data - Table "public.sales_data" + Table "s615.sales_data" Column | Type | Collation | Nullable | Default -------------+-----------------------+-----------+----------+--------- sale_id | integer | | not null | @@ -218,15 +232,15 @@ WARNING: This DDL statement will not be replicated. CREATE INDEX -- Validate concurrently created indexes \di *concurrent* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------------+-------+-------+-------------------- - public | concurrent_idx_tbl_name_idx | index | rocky | concurrent_idx_tbl - public | concurrent_unique_idx_tbl_id_idx | index | rocky | concurrent_idx_tbl + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------------+-------+---------+-------------------- + s615 | concurrent_idx_tbl_name_idx | index | appuser | concurrent_idx_tbl + s615 | concurrent_unique_idx_tbl_id_idx | index | appuser | concurrent_idx_tbl (2 rows) \d concurrent_idx_tbl - Table "public.concurrent_idx_tbl" + Table "s615.concurrent_idx_tbl" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | integer | | | @@ -268,27 +282,27 @@ SELECT * FROM concurrent_idx_tbl WHERE name = 'Second'; (1 row) -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set nspname | relname | set_name ---------+-----------------+---------- - public | product_catalog | default + s615 | product_catalog | default (1 row) -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set nspname | relname | set_name ---------+--------------------+---------- - public | employee_directory | default + s615 | employee_directory | default (1 row) -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set nspname | relname | set_name ---------+------------+---------- - public | sales_data | default + s615 | sales_data | default (1 row) -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set nspname | relname | set_name ---------+--------------------+--------------------- - public | concurrent_idx_tbl | default_insert_only + s615 | concurrent_idx_tbl | default_insert_only (1 row) diff --git a/t/auto_ddl/6155a_index_n1.sql b/t/auto_ddl/6155a_index_n1.sql index 35c4914e..f538c984 100644 --- a/t/auto_ddl/6155a_index_n1.sql +++ b/t/auto_ddl/6155a_index_n1.sql @@ -1,5 +1,13 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role +CREATE SCHEMA IF NOT EXISTS s615; + +GRANT ALL PRIVILEGES ON SCHEMA s615 TO appuser; + +SET ROLE appuser; + +SET search_path TO s615, public; ----------------------------- -- INDEX tests @@ -126,7 +134,7 @@ SELECT * FROM sales_data WHERE sale_amount > 150; SELECT * FROM concurrent_idx_tbl WHERE name = 'Second'; -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set diff --git a/t/auto_ddl/6155b_index_validate_n2.out b/t/auto_ddl/6155b_index_validate_n2.out index 7aec7a9d..f43a6b6e 100644 --- a/t/auto_ddl/6155b_index_validate_n2.out +++ b/t/auto_ddl/6155b_index_validate_n2.out @@ -1,23 +1,30 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET -- Validate and drop indexes on n2 -- Validate indexes on product_catalog \di *product_catalog_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------------+-------+-------+----------------- - public | brin_product_catalog_idx | index | rocky | product_catalog - public | btree_product_catalog_idx | index | rocky | product_catalog - public | gin_product_catalog_idx | index | rocky | product_catalog - public | gist_product_catalog_idx | index | rocky | product_catalog - public | hash_product_catalog_idx | index | rocky | product_catalog - public | product_catalog_pkey | index | rocky | product_catalog - public | spgist_product_catalog_idx | index | rocky | product_catalog + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------------+-------+---------+----------------- + s615 | brin_product_catalog_idx | index | appuser | product_catalog + s615 | btree_product_catalog_idx | index | appuser | product_catalog + s615 | gin_product_catalog_idx | index | appuser | product_catalog + s615 | gist_product_catalog_idx | index | appuser | product_catalog + s615 | hash_product_catalog_idx | index | appuser | product_catalog + s615 | product_catalog_pkey | index | appuser | product_catalog + s615 | spgist_product_catalog_idx | index | appuser | product_catalog (7 rows) \d product_catalog - Table "public.product_catalog" + Table "s615.product_catalog" Column | Type | Collation | Nullable | Default --------------+------------------------+-----------+----------+--------- product_id | integer | | not null | @@ -43,15 +50,15 @@ SELECT * FROM product_catalog WHERE product_id = 2; -- Expect 1 row with product -- Validate indexes on employee_directory \di *_emp_* - List of relations - Schema | Name | Type | Owner | Table ---------+----------------------+-------+-------+-------------------- - public | unique_emp_email_idx | index | rocky | employee_directory - public | unique_emp_id_idx | index | rocky | employee_directory + List of relations + Schema | Name | Type | Owner | Table +--------+----------------------+-------+---------+-------------------- + s615 | unique_emp_email_idx | index | appuser | employee_directory + s615 | unique_emp_id_idx | index | appuser | employee_directory (2 rows) \d employee_directory - Table "public.employee_directory" + Table "s615.employee_directory" Column | Type | Collation | Nullable | Default -----------+------------------------+-----------+----------+--------- emp_id | integer | | not null | @@ -71,18 +78,18 @@ SELECT * FROM employee_directory WHERE emp_email = 'bob@example.com'; -- Expect -- Validate indexes on sales_data \di *sales* - List of relations - Schema | Name | Type | Owner | Table ---------+--------------------------+-------+-------+------------ - public | conditional_sales_idx | index | rocky | sales_data - public | func_sales_amount_idx | index | rocky | sales_data - public | partial_sales_idx | index | rocky | sales_data - public | renamed_sales_region_idx | index | rocky | sales_data - public | sales_data_pkey | index | rocky | sales_data + List of relations + Schema | Name | Type | Owner | Table +--------+--------------------------+-------+---------+------------ + s615 | conditional_sales_idx | index | appuser | sales_data + s615 | func_sales_amount_idx | index | appuser | sales_data + s615 | partial_sales_idx | index | appuser | sales_data + s615 | renamed_sales_region_idx | index | appuser | sales_data + s615 | sales_data_pkey | index | appuser | sales_data (5 rows) \d sales_data - Table "public.sales_data" + Table "s615.sales_data" Column | Type | Collation | Nullable | Default -------------+-----------------------+-----------+----------+--------- sale_id | integer | | not null | @@ -107,7 +114,7 @@ SELECT * FROM sales_data WHERE sale_amount * 2 = 300.0; -- Expect 1 row with sal \di *concurrent* Did not find any relation named "*concurrent*". \d concurrent_idx_tbl - Table "public.concurrent_idx_tbl" + Table "s615.concurrent_idx_tbl" Column | Type | Collation | Nullable | Default --------+------------------------+-----------+----------+--------- id | integer | | | @@ -165,28 +172,28 @@ ERROR: index "concurrent_idx_tbl_name_idx" does not exist DROP INDEX CONCURRENTLY concurrent_unique_idx_tbl_id_idx; --error (since this did not replicate to n2) ERROR: index "concurrent_unique_idx_tbl_id_idx" does not exist -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set nspname | relname | set_name ---------+-----------------+---------- - public | product_catalog | default + s615 | product_catalog | default (1 row) -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set nspname | relname | set_name ---------+--------------------+---------- - public | employee_directory | default + s615 | employee_directory | default (1 row) -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set nspname | relname | set_name ---------+------------+---------- - public | sales_data | default + s615 | sales_data | default (1 row) -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set nspname | relname | set_name ---------+--------------------+--------------------- - public | concurrent_idx_tbl | default_insert_only + s615 | concurrent_idx_tbl | default_insert_only (1 row) DROP TABLE product_catalog CASCADE; diff --git a/t/auto_ddl/6155b_index_validate_n2.sql b/t/auto_ddl/6155b_index_validate_n2.sql index b2d53b47..58af39bd 100644 --- a/t/auto_ddl/6155b_index_validate_n2.sql +++ b/t/auto_ddl/6155b_index_validate_n2.sql @@ -1,5 +1,8 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +SET ROLE appuser; + +SET search_path TO s615, public; -- Validate and drop indexes on n2 @@ -50,10 +53,10 @@ DROP INDEX CONCURRENTLY concurrent_idx_tbl_name_idx; --error (since this did not DROP INDEX CONCURRENTLY concurrent_unique_idx_tbl_id_idx; --error (since this did not replicate to n2) -- Validate replication sets for primary key-related tables -EXECUTE spocktab('product_catalog'); -- Expect product_catalog in default set -EXECUTE spocktab('employee_directory'); -- Expect employee_directory in default set -EXECUTE spocktab('sales_data'); -- Expect sales_data in default set -EXECUTE spocktab('concurrent_idx_tbl'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('product_catalog'); -- Expect product_catalog in default set +SELECT * FROM get_table_repset_info('employee_directory'); -- Expect employee_directory in default set +SELECT * FROM get_table_repset_info('sales_data'); -- Expect sales_data in default set +SELECT * FROM get_table_repset_info('concurrent_idx_tbl'); -- Expect sales_data in default set DROP TABLE product_catalog CASCADE; DROP TABLE sales_data CASCADE; diff --git a/t/auto_ddl/6155c_index_validate_drop_n1.out b/t/auto_ddl/6155c_index_validate_drop_n1.out index 9ea3c5c9..8a02059d 100644 --- a/t/auto_ddl/6155c_index_validate_drop_n1.out +++ b/t/auto_ddl/6155c_index_validate_drop_n1.out @@ -1,12 +1,15 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6155b ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; - relid | nspname | relname | set_name --------+---------+---------+---------- -(0 rows) +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) --- none of these tables should exist. +SET ROLE appuser; +SET +SET search_path TO s615, public; +SET -- Validate indexes on product_catalog, should not exist \di *product_catalog_* Did not find any relation named "*product_catalog_*". @@ -27,3 +30,9 @@ Did not find any relation named "sales_data". Did not find any relation named "*concurrent*". \d concurrent_idx_tbl Did not find any relation named "concurrent_idx_tbl". +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s615 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6155c_index_validate_drop_n1.sql b/t/auto_ddl/6155c_index_validate_drop_n1.sql index 22bc68ed..3985066c 100644 --- a/t/auto_ddl/6155c_index_validate_drop_n1.sql +++ b/t/auto_ddl/6155c_index_validate_drop_n1.sql @@ -1,9 +1,10 @@ -- This file runs on n1 again to see all the table and their partitions have been dropped on n1 (as a result of drop statements) -- being auto replicated via 6155b +SELECT pg_sleep(1);--to ensure all objects are replicated ---spock.tables should be empty -SELECT * FROM spock.tables ORDER BY relid; --- none of these tables should exist. +SET ROLE appuser; + +SET search_path TO s615, public; -- Validate indexes on product_catalog, should not exist \di *product_catalog_* @@ -20,3 +21,7 @@ SELECT * FROM spock.tables ORDER BY relid; -- Validate concurrently created indexes on concurrent_idx_tbl, should not exist \di *concurrent* \d concurrent_idx_tbl + +RESET ROLE; +--dropping the schema +DROP SCHEMA s615 CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6166a_views_materialized_views_n1.out b/t/auto_ddl/6166a_views_materialized_views_n1.out index a6c74dbc..b728d48a 100644 --- a/t/auto_ddl/6166a_views_materialized_views_n1.out +++ b/t/auto_ddl/6166a_views_materialized_views_n1.out @@ -1,8 +1,26 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser role -- Create user schema for testing CREATE SCHEMA test_schema; INFO: DDL statement replicated. CREATE SCHEMA -SET search_path TO test_schema, public; +CREATE SCHEMA IF NOT EXISTS s616; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA s616 TO appuser; +INFO: DDL statement replicated. +GRANT +GRANT ALL PRIVILEGES ON SCHEMA test_schema TO appuser; +INFO: DDL statement replicated. +GRANT +SET ROLE appuser; +SET +SET search_path TO test_schema, s616; SET -- Create a base table with a primary key CREATE TABLE test_tbl ( @@ -97,38 +115,37 @@ CREATE MATERIALIZED VIEW mv_test_view_tablespace TABLESPACE pg_default AS SELECT id, name, age FROM test_tbl WHERE age > 30; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 --- Reset search_path to default -RESET search_path; -RESET +SET search_path TO s616, test_schema; +SET -- Create a simple view in the default schema -CREATE VIEW public.view_test_default AS +CREATE VIEW s616.view_test_default AS SELECT * FROM test_schema.test_tbl_no_pk; INFO: DDL statement replicated. CREATE VIEW -- Create or replace a view in the default schema -CREATE OR REPLACE VIEW public.view_test_default AS +CREATE OR REPLACE VIEW s616.view_test_default AS SELECT id, description FROM test_schema.test_tbl_no_pk WHERE id > 1; INFO: DDL statement replicated. CREATE VIEW --creating views and materialized views that depend on other views -- Create a view that depends on another view CREATE VIEW test_schema.view_depends_on_default AS -SELECT id, description FROM public.view_test_default WHERE id > 1; +SELECT id, description FROM s616.view_test_default WHERE id > 1; INFO: DDL statement replicated. CREATE VIEW -- Create a materialized view that depends on a view in another schema -CREATE MATERIALIZED VIEW public.mv_depends_on_test_schema AS +CREATE MATERIALIZED VIEW s616.mv_depends_on_test_schema AS SELECT id, name, age FROM test_schema.mv_test_view; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 --- Create a new view that depends on the materialized view public.mv_depends_on_test_schema -CREATE VIEW public.view_depends_on_mv AS -SELECT id, name FROM public.mv_depends_on_test_schema WHERE age > 30; +-- Create a new view that depends on the materialized view s616.mv_depends_on_test_schema +CREATE VIEW s616.view_depends_on_mv AS +SELECT id, name FROM s616.mv_depends_on_test_schema WHERE age > 30; INFO: DDL statement replicated. CREATE VIEW -- Create a new materialized view that depends on a regular view -CREATE MATERIALIZED VIEW public.mv_depends_on_mv AS -SELECT id, name FROM public.view_depends_on_mv; +CREATE MATERIALIZED VIEW s616.mv_depends_on_mv AS +SELECT id, name FROM s616.view_depends_on_mv; WARNING: DDL statement replicated, but could be unsafe. SELECT 1 -- Validations @@ -145,7 +162,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 25; -- Expect 2 rows: Alice, Carol @@ -168,7 +185,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; -- Expect 1 row: Carol @@ -218,7 +235,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl; + FROM test_tbl; Options: security_barrier=true -- Expect 3 rows: Alice, Bob, Carol @@ -242,7 +259,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 25; Options: check_option=local @@ -266,7 +283,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -289,7 +306,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -312,7 +329,7 @@ View definition: SELECT id AS person_id, name AS person_name, age AS person_age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -335,7 +352,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -358,7 +375,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap Options: fillfactor=70 @@ -382,7 +399,7 @@ View definition: SELECT id, name, age - FROM test_schema.test_tbl + FROM test_tbl WHERE age > 30; Access method: heap @@ -394,8 +411,8 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; (1 row) -- Validation for view_test_default -\d+ public.view_test_default - View "public.view_test_default" +\d+ s616.view_test_default + View "s616.view_test_default" Column | Type | Collation | Nullable | Default | Storage | Description -------------+---------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -403,11 +420,11 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; View definition: SELECT id, description - FROM test_schema.test_tbl_no_pk + FROM test_tbl_no_pk WHERE id > 1; -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; id | description ----+-------------------- 2 | Second description @@ -434,8 +451,8 @@ SELECT * FROM test_schema.view_depends_on_default ORDER BY id; (1 row) -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema - Materialized view "public.mv_depends_on_test_schema" +\d+ s616.mv_depends_on_test_schema + Materialized view "s616.mv_depends_on_test_schema" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -445,19 +462,19 @@ View definition: SELECT id, name, age - FROM test_schema.mv_test_view; + FROM mv_test_view; Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; id | name | age ----+-------+----- 3 | Carol | 35 (1 row) -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv - View "public.view_depends_on_mv" +\d+ s616.view_depends_on_mv + View "s616.view_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Description --------+-----------------------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -469,15 +486,15 @@ View definition: WHERE age > 30; -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv - Materialized view "public.mv_depends_on_mv" +\d+ s616.mv_depends_on_mv + Materialized view "s616.mv_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -489,7 +506,7 @@ View definition: Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol diff --git a/t/auto_ddl/6166a_views_materialized_views_n1.sql b/t/auto_ddl/6166a_views_materialized_views_n1.sql index 3b874a14..25a68cdc 100644 --- a/t/auto_ddl/6166a_views_materialized_views_n1.sql +++ b/t/auto_ddl/6166a_views_materialized_views_n1.sql @@ -1,6 +1,17 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser role + -- Create user schema for testing CREATE SCHEMA test_schema; -SET search_path TO test_schema, public; +CREATE SCHEMA IF NOT EXISTS s616; + +GRANT ALL PRIVILEGES ON SCHEMA s616 TO appuser; +GRANT ALL PRIVILEGES ON SCHEMA test_schema TO appuser; + +SET ROLE appuser; + +SET search_path TO test_schema, s616; -- Create a base table with a primary key CREATE TABLE test_tbl ( @@ -79,33 +90,32 @@ SELECT id, name, age FROM test_tbl WHERE age > 30; CREATE MATERIALIZED VIEW mv_test_view_tablespace TABLESPACE pg_default AS SELECT id, name, age FROM test_tbl WHERE age > 30; --- Reset search_path to default -RESET search_path; +SET search_path TO s616, test_schema; -- Create a simple view in the default schema -CREATE VIEW public.view_test_default AS +CREATE VIEW s616.view_test_default AS SELECT * FROM test_schema.test_tbl_no_pk; -- Create or replace a view in the default schema -CREATE OR REPLACE VIEW public.view_test_default AS +CREATE OR REPLACE VIEW s616.view_test_default AS SELECT id, description FROM test_schema.test_tbl_no_pk WHERE id > 1; --creating views and materialized views that depend on other views -- Create a view that depends on another view CREATE VIEW test_schema.view_depends_on_default AS -SELECT id, description FROM public.view_test_default WHERE id > 1; +SELECT id, description FROM s616.view_test_default WHERE id > 1; -- Create a materialized view that depends on a view in another schema -CREATE MATERIALIZED VIEW public.mv_depends_on_test_schema AS +CREATE MATERIALIZED VIEW s616.mv_depends_on_test_schema AS SELECT id, name, age FROM test_schema.mv_test_view; --- Create a new view that depends on the materialized view public.mv_depends_on_test_schema -CREATE VIEW public.view_depends_on_mv AS -SELECT id, name FROM public.mv_depends_on_test_schema WHERE age > 30; +-- Create a new view that depends on the materialized view s616.mv_depends_on_test_schema +CREATE VIEW s616.view_depends_on_mv AS +SELECT id, name FROM s616.mv_depends_on_test_schema WHERE age > 30; -- Create a new materialized view that depends on a regular view -CREATE MATERIALIZED VIEW public.mv_depends_on_mv AS -SELECT id, name FROM public.view_depends_on_mv; +CREATE MATERIALIZED VIEW s616.mv_depends_on_mv AS +SELECT id, name FROM s616.view_depends_on_mv; -- Validations -- Validate structure and data in views @@ -165,9 +175,9 @@ SELECT * FROM test_schema.mv_test_view_storage ORDER BY id; SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; -- Validation for view_test_default -\d+ public.view_test_default +\d+ s616.view_test_default -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; -- Validation for view_depends_on_default \d+ test_schema.view_depends_on_default @@ -175,16 +185,16 @@ SELECT * FROM public.view_test_default ORDER BY id; SELECT * FROM test_schema.view_depends_on_default ORDER BY id; -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema +\d+ s616.mv_depends_on_test_schema -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv +\d+ s616.view_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv +\d+ s616.mv_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; diff --git a/t/auto_ddl/6166b_view_mat_views_validate_n2.out b/t/auto_ddl/6166b_view_mat_views_validate_n2.out index 6b7275b8..87452aff 100644 --- a/t/auto_ddl/6166b_view_mat_views_validate_n2.out +++ b/t/auto_ddl/6166b_view_mat_views_validate_n2.out @@ -263,8 +263,8 @@ SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; (1 row) -- Validation for view_test_default -\d+ public.view_test_default - View "public.view_test_default" +\d+ s616.view_test_default + View "s616.view_test_default" Column | Type | Collation | Nullable | Default | Storage | Description -------------+---------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -276,7 +276,7 @@ View definition: WHERE id > 1; -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; id | description ----+-------------------- 2 | Second description @@ -292,7 +292,7 @@ SELECT * FROM public.view_test_default ORDER BY id; View definition: SELECT id, description - FROM view_test_default + FROM s616.view_test_default WHERE id > 1; -- Expect 1 row: Second description @@ -303,8 +303,8 @@ SELECT * FROM test_schema.view_depends_on_default ORDER BY id; (1 row) -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema - Materialized view "public.mv_depends_on_test_schema" +\d+ s616.mv_depends_on_test_schema + Materialized view "s616.mv_depends_on_test_schema" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -318,15 +318,15 @@ View definition: Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; id | name | age ----+-------+----- 3 | Carol | 35 (1 row) -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv - View "public.view_depends_on_mv" +\d+ s616.view_depends_on_mv + View "s616.view_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Description --------+-----------------------+-----------+----------+---------+----------+------------- id | integer | | | | plain | @@ -334,19 +334,19 @@ SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; View definition: SELECT id, name - FROM mv_depends_on_test_schema + FROM s616.mv_depends_on_test_schema WHERE age > 30; -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv - Materialized view "public.mv_depends_on_mv" +\d+ s616.mv_depends_on_mv + Materialized view "s616.mv_depends_on_mv" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+-----------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -354,25 +354,25 @@ SELECT * FROM public.view_depends_on_mv ORDER BY id; View definition: SELECT id, name - FROM view_depends_on_mv; + FROM s616.view_depends_on_mv; Access method: heap -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; id | name ----+------- 3 | Carol (1 row) -- Drop views and materialized views --- Drop the materialized view in the public schema that depends on a view in the test_schema -DROP MATERIALIZED VIEW IF EXISTS public.mv_depends_on_test_schema CASCADE; +-- Drop the materialized view in the s616 schema that depends on a view in the test_schema +DROP MATERIALIZED VIEW IF EXISTS s616.mv_depends_on_test_schema CASCADE; NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to view view_depends_on_mv -drop cascades to materialized view mv_depends_on_mv +DETAIL: drop cascades to view s616.view_depends_on_mv +drop cascades to materialized view s616.mv_depends_on_mv INFO: DDL statement replicated. DROP MATERIALIZED VIEW --- Drop the view in the test_schema that depends on another view in the public schema +-- Drop the view in the test_schema that depends on another view in the s616 schema DROP VIEW test_schema.view_depends_on_default CASCADE; INFO: DDL statement replicated. DROP VIEW @@ -382,7 +382,7 @@ DROP VIEW IF EXISTS test_schema.view_test_1, test_schema.view_recursive, test_schema.view_with_options, test_schema.view_with_check_option, - public.view_test_default CASCADE; + s616.view_test_default CASCADE; INFO: DDL statement replicated. DROP VIEW DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, @@ -402,7 +402,3 @@ DROP TABLE IF EXISTS test_schema.test_tbl_no_pk CASCADE; NOTICE: drop cascades to table test_schema.test_tbl_no_pk membership in replication set default_insert_only INFO: DDL statement replicated. DROP TABLE --- Drop the schema -DROP SCHEMA test_schema CASCADE; -INFO: DDL statement replicated. -DROP SCHEMA diff --git a/t/auto_ddl/6166b_view_mat_views_validate_n2.sql b/t/auto_ddl/6166b_view_mat_views_validate_n2.sql index 6aa848d8..365ebace 100644 --- a/t/auto_ddl/6166b_view_mat_views_validate_n2.sql +++ b/t/auto_ddl/6166b_view_mat_views_validate_n2.sql @@ -59,9 +59,9 @@ SELECT * FROM test_schema.mv_test_view_storage ORDER BY id; SELECT * FROM test_schema.mv_test_view_tablespace ORDER BY id; -- Validation for view_test_default -\d+ public.view_test_default +\d+ s616.view_test_default -- Expect 1 row: Second description -SELECT * FROM public.view_test_default ORDER BY id; +SELECT * FROM s616.view_test_default ORDER BY id; -- Validation for view_depends_on_default \d+ test_schema.view_depends_on_default @@ -69,26 +69,26 @@ SELECT * FROM public.view_test_default ORDER BY id; SELECT * FROM test_schema.view_depends_on_default ORDER BY id; -- Validation for mv_depends_on_test_schema -\d+ public.mv_depends_on_test_schema +\d+ s616.mv_depends_on_test_schema -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_test_schema ORDER BY id; +SELECT * FROM s616.mv_depends_on_test_schema ORDER BY id; -- Validation for view_depends_on_mv -\d+ public.view_depends_on_mv +\d+ s616.view_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.view_depends_on_mv ORDER BY id; +SELECT * FROM s616.view_depends_on_mv ORDER BY id; -- Validation for mv_depends_on_mv -\d+ public.mv_depends_on_mv +\d+ s616.mv_depends_on_mv -- Expect 1 row: Carol -SELECT * FROM public.mv_depends_on_mv ORDER BY id; +SELECT * FROM s616.mv_depends_on_mv ORDER BY id; -- Drop views and materialized views --- Drop the materialized view in the public schema that depends on a view in the test_schema -DROP MATERIALIZED VIEW IF EXISTS public.mv_depends_on_test_schema CASCADE; +-- Drop the materialized view in the s616 schema that depends on a view in the test_schema +DROP MATERIALIZED VIEW IF EXISTS s616.mv_depends_on_test_schema CASCADE; --- Drop the view in the test_schema that depends on another view in the public schema +-- Drop the view in the test_schema that depends on another view in the s616 schema DROP VIEW test_schema.view_depends_on_default CASCADE; -- Drop all other views and materialized views @@ -97,7 +97,7 @@ DROP VIEW IF EXISTS test_schema.view_test_1, test_schema.view_recursive, test_schema.view_with_options, test_schema.view_with_check_option, - public.view_test_default CASCADE; + s616.view_test_default CASCADE; DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, test_schema.mv_test_view_colnames, @@ -109,5 +109,3 @@ DROP MATERIALIZED VIEW IF EXISTS test_schema.mv_test_view, DROP TABLE IF EXISTS test_schema.test_tbl CASCADE; DROP TABLE IF EXISTS test_schema.test_tbl_no_pk CASCADE; --- Drop the schema -DROP SCHEMA test_schema CASCADE; diff --git a/t/auto_ddl/6166c_views_mat_view_validate_n1.out b/t/auto_ddl/6166c_views_mat_view_validate_n1.out index 3e3946ad..00d63006 100644 --- a/t/auto_ddl/6166c_views_mat_view_validate_n1.out +++ b/t/auto_ddl/6166c_views_mat_view_validate_n1.out @@ -23,13 +23,23 @@ Did not find any relation named "test_schema.mv_test_view_method". Did not find any relation named "test_schema.mv_test_view_storage". \d test_schema.mv_test_view_tablespace Did not find any relation named "test_schema.mv_test_view_tablespace". -\d public.view_test_default -Did not find any relation named "public.view_test_default". +\d s616.view_test_default +Did not find any relation named "s616.view_test_default". \d test_schema.view_depends_on_default Did not find any relation named "test_schema.view_depends_on_default". -\d public.mv_depends_on_test_schema -Did not find any relation named "public.mv_depends_on_test_schema". -\d public.view_depends_on_mv -Did not find any relation named "public.view_depends_on_mv". -\d public.mv_depends_on_mv -Did not find any relation named "public.mv_depends_on_mv". +\d s616.mv_depends_on_test_schema +Did not find any relation named "s616.mv_depends_on_test_schema". +\d s616.view_depends_on_mv +Did not find any relation named "s616.view_depends_on_mv". +\d s616.mv_depends_on_mv +Did not find any relation named "s616.mv_depends_on_mv". +RESET ROLE; +RESET +--dropping the schema +DROP SCHEMA s616 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA +-- Drop the schema +DROP SCHEMA test_schema CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA diff --git a/t/auto_ddl/6166c_views_mat_view_validate_n1.sql b/t/auto_ddl/6166c_views_mat_view_validate_n1.sql index cade8158..a0d8cfcf 100644 --- a/t/auto_ddl/6166c_views_mat_view_validate_n1.sql +++ b/t/auto_ddl/6166c_views_mat_view_validate_n1.sql @@ -24,12 +24,18 @@ \d test_schema.mv_test_view_tablespace -\d public.view_test_default +\d s616.view_test_default \d test_schema.view_depends_on_default -\d public.mv_depends_on_test_schema +\d s616.mv_depends_on_test_schema -\d public.view_depends_on_mv +\d s616.view_depends_on_mv -\d public.mv_depends_on_mv \ No newline at end of file +\d s616.mv_depends_on_mv + +RESET ROLE; +--dropping the schema +DROP SCHEMA s616 CASCADE; +-- Drop the schema +DROP SCHEMA test_schema CASCADE; \ No newline at end of file diff --git a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out index 10d4e4de..2038845d 100644 --- a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out +++ b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.out @@ -1,6 +1,38 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser2 role +CREATE ROLE appuser2 LOGIN; +INFO: DDL statement replicated. +CREATE ROLE +CREATE SCHEMA IF NOT EXISTS s617; +INFO: DDL statement replicated. +CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser2; +INFO: DDL statement replicated. +GRANT +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser2; +INFO: DDL statement replicated. +GRANT +GRANT ALL PRIVILEGES ON SCHEMA s617 TO appuser2; +INFO: DDL statement replicated. +GRANT +DO $$ +DECLARE + db_name TEXT; +BEGIN + -- Get the name of the current database + db_name := current_database(); + + -- Dynamically execute the GRANT command for appuser2 + EXECUTE format('GRANT CREATE ON DATABASE %I TO appuser2', db_name); +END $$; +INFO: DDL statement replicated. +DO -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; WARNING: This DDL statement will not be replicated. @@ -23,6 +55,10 @@ SHOW spock.allow_ddl_from_functions; on (1 row) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Create simple tables CREATE TABLE tab1_proc_on (id INT PRIMARY KEY, col1 TEXT, col2 INT); INFO: DDL statement replicated. @@ -118,16 +154,16 @@ FOR EACH ROW EXECUTE FUNCTION employee_insert_trigger(); INFO: DDL statement replicated. CREATE TRIGGER -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+--------------------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default_insert_only - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default_insert_only + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) -- Add a primary key to the table tab3 within an anonymous block @@ -187,21 +223,23 @@ INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. DO -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) ------ -- Turning allow_ddl_from_functions GUC off ------ +RESET ROLE; +RESET -- Turn off the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = off; WARNING: This DDL statement will not be replicated. @@ -224,6 +262,10 @@ SHOW spock.allow_ddl_from_functions; off (1 row) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Run anonymous block to create tab7 DO $$ BEGIN @@ -279,25 +321,25 @@ DO List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+--------------------------------------------------------------------------------------------------------------------+------ - public | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc + s617 | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc (1 row) \df remove_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+-------------------------------------------------------------+------ - public | remove_column_from_table | boolean | table_name character varying, column_name character varying | func + s617 | remove_column_from_table | boolean | table_name character varying, column_name character varying | func (1 row) \df employee_insert_trigger List of functions Schema | Name | Result data type | Argument data types | Type --------+-------------------------+------------------+---------------------+------ - public | employee_insert_trigger | trigger | | func + s617 | employee_insert_trigger | trigger | | func (1 row) \d+ tab1_proc_on - Table "public.tab1_proc_on" + Table "s617.tab1_proc_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -311,7 +353,7 @@ Indexes: Access method: heap \d+ tab2_func_on - Table "public.tab2_func_on" + Table "s617.tab2_func_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -325,7 +367,7 @@ Indexes: Access method: heap \d+ tab3_anon_on - Table "public.tab3_anon_on" + Table "s617.tab3_anon_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -336,7 +378,7 @@ Indexes: Access method: heap \d+ tab4_proc_off - Table "public.tab4_proc_off" + Table "s617.tab4_proc_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -350,7 +392,7 @@ Indexes: Access method: heap \d+ tab5_func_off - Table "public.tab5_func_off" + Table "s617.tab5_func_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -364,7 +406,7 @@ Indexes: Access method: heap \d+ tab6_anon_off - Table "public.tab6_anon_off" + Table "s617.tab6_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- col1 | text | | | | extended | | | @@ -372,7 +414,7 @@ Access method: heap Access method: heap \d+ tab7_anon_off - Table "public.tab7_anon_off" + Table "s617.tab7_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | | | plain | | | @@ -381,7 +423,7 @@ Access method: heap Access method: heap \d+ tab_emp - Table "public.tab_emp" + Table "s617.tab_emp" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -394,43 +436,43 @@ Triggers: Access method: heap \dn john -List of schemas - Name | Owner -------+------- - john | rocky + List of schemas + Name | Owner +------+---------- + john | appuser2 (1 row) \dn alice -List of schemas - Name | Owner --------+------- - alice | rocky + List of schemas + Name | Owner +-------+---------- + alice | appuser2 (1 row) \dn cena -List of schemas - Name | Owner -------+------- - cena | rocky + List of schemas + Name | Owner +------+---------- + cena | appuser2 (1 row) \dn wonderland - List of schemas - Name | Owner -------------+------- - wonderland | rocky + List of schemas + Name | Owner +------------+---------- + wonderland | appuser2 (1 row) -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default - public | tab7_anon_off | + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default + s617 | tab7_anon_off | (8 rows) diff --git a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql index 69dfce5e..8966c2a4 100644 --- a/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql +++ b/t/auto_ddl/6177a_allow_ddl_from_func_proc_create_alter_n1.sql @@ -1,5 +1,23 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated + +--creating the necessary pre-reqs and then switching to the appuser2 role +CREATE ROLE appuser2 LOGIN; +CREATE SCHEMA IF NOT EXISTS s617; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser2; +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser2; +GRANT ALL PRIVILEGES ON SCHEMA s617 TO appuser2; +DO $$ +DECLARE + db_name TEXT; +BEGIN + -- Get the name of the current database + db_name := current_database(); + + -- Dynamically execute the GRANT command for appuser2 + EXECUTE format('GRANT CREATE ON DATABASE %I TO appuser2', db_name); +END $$; -- Turn on the allow_ddl_from_functions GUC @@ -8,6 +26,10 @@ SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET ROLE appuser2; + +SET search_path TO s617, public; + -- Create simple tables CREATE TABLE tab1_proc_on (id INT PRIMARY KEY, col1 TEXT, col2 INT); CREATE TABLE tab2_func_on (id INT PRIMARY KEY, col1 TEXT, col2 INT, col3 TEXT, col4 INT, col5 TEXT, col6 INT, col7 TEXT, col8 INT); @@ -83,7 +105,7 @@ ON tab_emp FOR EACH ROW EXECUTE FUNCTION employee_insert_trigger(); -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); -- Add a primary key to the table tab3 within an anonymous block DO $$ @@ -116,18 +138,22 @@ BEGIN END $$; -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); ------ -- Turning allow_ddl_from_functions GUC off ------ - +RESET ROLE; -- Turn off the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = off; SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET ROLE appuser2; + +SET search_path TO s617, public; + -- Run anonymous block to create tab7 DO $$ BEGIN @@ -177,4 +203,4 @@ $$; \dn cena \dn wonderland -EXECUTE spocktab('tab'); \ No newline at end of file +SELECT * FROM get_table_repset_info('tab'); \ No newline at end of file diff --git a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out index 976ac454..e15e351e 100644 --- a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out +++ b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.out @@ -1,6 +1,9 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; -PREPARE +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; WARNING: This DDL statement will not be replicated. @@ -23,30 +26,32 @@ SHOW spock.allow_ddl_from_functions; on (1 row) +SET search_path TO s617, public; +SET -- Validate replicated functions, procedures, tables \df add_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+--------------------------------------------------------------------------------------------------------------------+------ - public | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc + s617 | add_column_to_table_proc | | IN table_name character varying, IN varname character varying, IN vartype character varying, INOUT success boolean | proc (1 row) \df remove_column* List of functions Schema | Name | Result data type | Argument data types | Type --------+--------------------------+------------------+-------------------------------------------------------------+------ - public | remove_column_from_table | boolean | table_name character varying, column_name character varying | func + s617 | remove_column_from_table | boolean | table_name character varying, column_name character varying | func (1 row) \df employee_insert_trigger List of functions Schema | Name | Result data type | Argument data types | Type --------+-------------------------+------------------+---------------------+------ - public | employee_insert_trigger | trigger | | func + s617 | employee_insert_trigger | trigger | | func (1 row) \d+ tab1_proc_on - Table "public.tab1_proc_on" + Table "s617.tab1_proc_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ----------+--------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -60,7 +65,7 @@ Indexes: Access method: heap \d+ tab2_func_on - Table "public.tab2_func_on" + Table "s617.tab2_func_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -74,7 +79,7 @@ Indexes: Access method: heap \d+ tab3_anon_on - Table "public.tab3_anon_on" + Table "s617.tab3_anon_on" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -85,7 +90,7 @@ Indexes: Access method: heap \d+ tab4_proc_off - Table "public.tab4_proc_off" + Table "s617.tab4_proc_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -96,7 +101,7 @@ Indexes: Access method: heap \d+ tab5_func_off - Table "public.tab5_func_off" + Table "s617.tab5_func_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -113,7 +118,7 @@ Indexes: Access method: heap \d+ tab6_anon_off - Table "public.tab6_anon_off" + Table "s617.tab6_anon_off" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+---------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -126,7 +131,7 @@ Access method: heap \d+ tab7_anon_off Did not find any relation named "tab7_anon_off". \d+ tab_emp - Table "public.tab_emp" + Table "s617.tab_emp" Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description --------+------------------------+-----------+----------+---------+----------+-------------+--------------+------------- id | integer | | not null | | plain | | | @@ -139,10 +144,10 @@ Triggers: Access method: heap \dn john -List of schemas - Name | Owner -------+------- - john | rocky + List of schemas + Name | Owner +------+---------- + john | appuser2 (1 row) \dn alice @@ -152,10 +157,10 @@ List of schemas (0 rows) \dn cena -List of schemas - Name | Owner -------+------- - cena | rocky + List of schemas + Name | Owner +------+---------- + cena | appuser2 (1 row) \dn wonderland @@ -164,18 +169,22 @@ List of schemas ------+------- (0 rows) -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); nspname | relname | set_name ---------+---------------+---------- - public | tab1_proc_on | default - public | tab2_func_on | default - public | tab3_anon_on | default - public | tab6_anon_off | default - public | tab4_proc_off | default - public | tab5_func_off | default - public | tab_emp | default + s617 | tab1_proc_on | default + s617 | tab2_func_on | default + s617 | tab3_anon_on | default + s617 | tab6_anon_off | default + s617 | tab4_proc_off | default + s617 | tab5_func_off | default + s617 | tab_emp | default (7 rows) +SET ROLE appuser2; +SET +SET search_path TO s617, public; +SET -- Drop tables DO $$ BEGIN @@ -189,8 +198,6 @@ BEGIN EXECUTE 'DROP PROCEDURE add_column_to_table_proc'; EXECUTE 'DROP FUNCTION remove_column_from_table'; EXECUTE 'DROP FUNCTION employee_insert_trigger'; - EXECUTE 'DROP SCHEMA john'; - EXECUTE 'DROP SCHEMA cena'; END $$; NOTICE: drop cascades to table tab1_proc_on membership in replication set default @@ -210,8 +217,6 @@ INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. INFO: DDL statement replicated. -INFO: DDL statement replicated. -INFO: DDL statement replicated. DO DO $$ BEGIN @@ -221,7 +226,18 @@ $$; ERROR: table "tab7_anon_off" does not exist CONTEXT: SQL statement "DROP TABLE tab7_anon_off" PL/pgSQL function inline_code_block line 3 at EXECUTE ---should error out as these shouldn't be replicated to n2 +RESET ROLE; +RESET +DO $$ +BEGIN + EXECUTE 'DROP SCHEMA john'; + EXECUTE 'DROP SCHEMA cena'; +END +$$; +INFO: DDL statement replicated. +INFO: DDL statement replicated. +DO +--should error out as these shouldn't have been replicated to n2 DROP SCHEMA alice; ERROR: schema "alice" does not exist DROP SCHEMA wonderland; diff --git a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql index 38eecfab..be97c941 100644 --- a/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql +++ b/t/auto_ddl/6177b_allow_ddl_from_func_proc_validate_drop_n2.sql @@ -1,5 +1,4 @@ --- Prepared statement for spock.tables to list tables and associated indexes -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE '%' || $1 || '%' ORDER BY relid; +SELECT pg_sleep(1); -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; @@ -7,6 +6,8 @@ SELECT pg_reload_conf(); SELECT pg_sleep(0.5); SHOW spock.allow_ddl_from_functions; +SET search_path TO s617, public; + -- Validate replicated functions, procedures, tables \df add_column* \df remove_column* @@ -23,7 +24,11 @@ SHOW spock.allow_ddl_from_functions; \dn alice \dn cena \dn wonderland -EXECUTE spocktab('tab'); +SELECT * FROM get_table_repset_info('tab'); + +SET ROLE appuser2; + +SET search_path TO s617, public; -- Drop tables DO $$ BEGIN @@ -37,8 +42,6 @@ BEGIN EXECUTE 'DROP PROCEDURE add_column_to_table_proc'; EXECUTE 'DROP FUNCTION remove_column_from_table'; EXECUTE 'DROP FUNCTION employee_insert_trigger'; - EXECUTE 'DROP SCHEMA john'; - EXECUTE 'DROP SCHEMA cena'; END $$; @@ -47,6 +50,15 @@ BEGIN EXECUTE 'DROP TABLE tab7_anon_off'; --should not exist END $$; ---should error out as these shouldn't be replicated to n2 + +RESET ROLE; + +DO $$ +BEGIN + EXECUTE 'DROP SCHEMA john'; + EXECUTE 'DROP SCHEMA cena'; +END +$$; +--should error out as these shouldn't have been replicated to n2 DROP SCHEMA alice; DROP SCHEMA wonderland; \ No newline at end of file diff --git a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out index 712d4fcf..73bd375f 100644 --- a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out +++ b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.out @@ -1,5 +1,13 @@ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + -- Validate replicated functions, procedures, tables -- No objects sould exist except tab7, schemas (alice,wonderland) +SET search_path TO s617, public; +SET \df add_column* List of functions Schema | Name | Result data type | Argument data types | Type @@ -31,7 +39,7 @@ Did not find any relation named "tab5_func_off". \d tab6_anon_off Did not find any relation named "tab6_anon_off". \d tab7_anon_off - Table "public.tab7_anon_off" + Table "s617.tab7_anon_off" Column | Type | Collation | Nullable | Default --------+---------+-----------+----------+--------- id | integer | | | @@ -47,10 +55,10 @@ List of schemas (0 rows) \dn alice -List of schemas - Name | Owner --------+------- - alice | rocky + List of schemas + Name | Owner +-------+---------- + alice | appuser2 (1 row) \dn cena @@ -60,10 +68,10 @@ List of schemas (0 rows) \dn wonderland - List of schemas - Name | Owner -------------+------- - wonderland | rocky + List of schemas + Name | Owner +------------+---------- + wonderland | appuser2 (1 row) -- Turn off the allow_ddl_from_functions GUC so that these drops are not auto replicated @@ -107,3 +115,13 @@ SELECT pg_reload_conf(); t (1 row) +--cleanup +DROP SCHEMA s617 CASCADE; +INFO: DDL statement replicated. +DROP SCHEMA +DROP OWNED BY appuser2; +INFO: DDL statement replicated. +DROP OWNED +DROP ROLE appuser2; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql index 751e008b..5d91eb6e 100644 --- a/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql +++ b/t/auto_ddl/6177c_allow_ddl_from_func_proc_validate_n1.sql @@ -1,5 +1,8 @@ +SELECT pg_sleep(1); -- Validate replicated functions, procedures, tables -- No objects sould exist except tab7, schemas (alice,wonderland) + +SET search_path TO s617, public; \df add_column* \df remove_column* \df employee_insert_trigger @@ -34,3 +37,7 @@ $$; -- Turn on the allow_ddl_from_functions GUC ALTER SYSTEM SET spock.allow_ddl_from_functions = on; SELECT pg_reload_conf(); +--cleanup +DROP SCHEMA s617 CASCADE; +DROP OWNED BY appuser2; +DROP ROLE appuser2; diff --git a/t/auto_ddl/6666a_all_objects_create_n1.out b/t/auto_ddl/6666a_all_objects_create_n1.out index 235ba695..0ed52b53 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.out +++ b/t/auto_ddl/6666a_all_objects_create_n1.out @@ -1,10 +1,28 @@ --- Create spocktab prepared statement -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE $1 ORDER BY relid; -PREPARE +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + +--creating the necessary pre-reqs and then switching to the appuser3 (non-superuser) role +CREATE ROLE appuser3 LOGIN; +INFO: DDL statement replicated. +CREATE ROLE -- Create schema -CREATE SCHEMA s1; +CREATE SCHEMA s1 AUTHORIZATION appuser3; INFO: DDL statement replicated. CREATE SCHEMA +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser3; +INFO: DDL statement replicated. +GRANT +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser3; +INFO: DDL statement replicated. +GRANT +---------------- +SET ROLE TO adminuser; +SET +--performing the supseruser operations initially SET search_path TO s1; SET -- Create database @@ -47,10 +65,95 @@ WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. WARNING: This DDL statement will not be replicated. CREATE SUBSCRIPTION +-- Create language +CREATE LANGUAGE plperl; +INFO: DDL statement replicated. +CREATE EXTENSION +-- Create function for language internal +CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; +INFO: DDL statement replicated. +CREATE FUNCTION +-- Create foreign table +CREATE FOREIGN TABLE obj_foreign_table ( + id INT, + name TEXT +) SERVER obj_server; +INFO: DDL statement replicated. +CREATE FOREIGN TABLE +-- Create operator family +CREATE OPERATOR FAMILY obj_opfamily USING btree; +INFO: DDL statement replicated. +CREATE OPERATOR FAMILY +-- Create operator class +CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS + OPERATOR 1 < , + OPERATOR 2 <= , + OPERATOR 3 = , + OPERATOR 4 >= , + OPERATOR 5 > , + FUNCTION 1 btint4cmp(int4, int4); +INFO: DDL statement replicated. +CREATE OPERATOR CLASS +-- Create text search parser +CREATE TEXT SEARCH PARSER obj_tsparser ( + START = prsd_start, + GETTOKEN = prsd_nexttoken, + END = prsd_end, + LEXTYPES = prsd_lextype +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH PARSER +-- Create text search dictionary +CREATE TEXT SEARCH DICTIONARY obj_tsdict ( + TEMPLATE = simple +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH DICTIONARY +-- Create text search configuration +CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); +INFO: DDL statement replicated. +CREATE TEXT SEARCH CONFIGURATION +-- Create text search template +CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( + INIT = dsimple_init, + LEXIZE = dsimple_lexize +); +INFO: DDL statement replicated. +CREATE TEXT SEARCH TEMPLATE +-- Create transform +CREATE TRANSFORM FOR int LANGUAGE SQL ( + FROM SQL WITH FUNCTION prsd_lextype(internal), + TO SQL WITH FUNCTION int4recv(internal)); +INFO: DDL statement replicated. +CREATE TRANSFORM +-- Create event trigger +CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Event trigger activated: %', tg_tag; +END $$; +INFO: DDL statement replicated. +CREATE FUNCTION +CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); +INFO: DDL statement replicated. +CREATE EVENT TRIGGER +-- Create group +CREATE GROUP obj_group; +INFO: DDL statement replicated. +CREATE ROLE +RESET ROLE; +RESET +-- non super user operations +-- switching to appuser3 with limited privileges +SET ROLE TO appuser3; +SET +SET search_path TO s1; +SET CREATE TYPE obj_type AS (x INT, y INT); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE DOMAIN obj_domain AS INT; +NOTICE: Event trigger activated: CREATE DOMAIN INFO: DDL statement replicated. CREATE DOMAIN -- Create cast @@ -58,64 +161,59 @@ CREATE FUNCTION obj_function_cast(obj_type) RETURNS INT LANGUAGE plpgsql AS $$ BEGIN RETURN $1.x + $1.y; END $$; +NOTICE: Event trigger activated: CREATE FUNCTION INFO: DDL statement replicated. CREATE FUNCTION -- Create the cast from obj_type1 to int CREATE CAST (obj_type AS int) WITH FUNCTION obj_function_cast(obj_type) AS IMPLICIT; +NOTICE: Event trigger activated: CREATE CAST INFO: DDL statement replicated. CREATE CAST -- Create aggregate -CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; -INFO: DDL statement replicated. -CREATE FUNCTION --- Create aggregate CREATE AGGREGATE obj_aggregate ( sfunc = int4_sum, stype = int4, basetype = int4, initcond = '0' ); +NOTICE: Event trigger activated: CREATE AGGREGATE INFO: DDL statement replicated. CREATE AGGREGATE -- Create collation CREATE COLLATION obj_collation (lc_collate = 'C', lc_ctype = 'C'); +NOTICE: Event trigger activated: CREATE COLLATION INFO: DDL statement replicated. CREATE COLLATION -- Create conversion CREATE CONVERSION obj_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +NOTICE: Event trigger activated: CREATE CONVERSION INFO: DDL statement replicated. CREATE CONVERSION -- Create domain CREATE DOMAIN obj_domain2 AS INT CHECK (VALUE >= 0); +NOTICE: Event trigger activated: CREATE DOMAIN INFO: DDL statement replicated. CREATE DOMAIN --- Create foreign table -CREATE FOREIGN TABLE obj_foreign_table ( - id INT, - name TEXT -) SERVER obj_server; -INFO: DDL statement replicated. -CREATE FOREIGN TABLE -- Create function CREATE FUNCTION obj_function() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN RETURN NEW; END $$; +NOTICE: Event trigger activated: CREATE FUNCTION INFO: DDL statement replicated. CREATE FUNCTION -- Create index CREATE TABLE obj_table (id INT PRIMARY KEY, name TEXT); +NOTICE: Event trigger activated: CREATE TABLE INFO: DDL statement replicated. CREATE TABLE CREATE INDEX obj_index ON obj_table (name); +NOTICE: Event trigger activated: CREATE INDEX INFO: DDL statement replicated. CREATE INDEX --- Create language -CREATE LANGUAGE plperl; -INFO: DDL statement replicated. -CREATE EXTENSION -- Create materialized view CREATE MATERIALIZED VIEW obj_mview AS SELECT * FROM obj_table WITH NO DATA; +NOTICE: Event trigger activated: CREATE MATERIALIZED VIEW WARNING: DDL statement replicated, but could be unsafe. CREATE MATERIALIZED VIEW -- Create operator @@ -125,24 +223,12 @@ CREATE OPERATOR ## ( function = path_inter, commutator = ## ); +NOTICE: Event trigger activated: CREATE OPERATOR INFO: DDL statement replicated. CREATE OPERATOR --- Create operator family -CREATE OPERATOR FAMILY obj_opfamily USING btree; -INFO: DDL statement replicated. -CREATE OPERATOR FAMILY --- Create operator class -CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS - OPERATOR 1 < , - OPERATOR 2 <= , - OPERATOR 3 = , - OPERATOR 4 >= , - OPERATOR 5 > , - FUNCTION 1 btint4cmp(int4, int4); -INFO: DDL statement replicated. -CREATE OPERATOR CLASS -- Create policy CREATE POLICY obj_policy ON obj_table FOR SELECT TO PUBLIC USING (true); +NOTICE: Event trigger activated: CREATE POLICY INFO: DDL statement replicated. CREATE POLICY -- Create procedure @@ -150,83 +236,53 @@ CREATE PROCEDURE obj_procedure() LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'Procedure executed'; END $$; +NOTICE: Event trigger activated: CREATE PROCEDURE INFO: DDL statement replicated. CREATE PROCEDURE -- Create rule CREATE RULE obj_rule AS ON INSERT TO obj_table DO ALSO NOTHING; +NOTICE: Event trigger activated: CREATE RULE INFO: DDL statement replicated. CREATE RULE --- Create text search dictionary -CREATE TEXT SEARCH DICTIONARY obj_tsdict ( - TEMPLATE = simple -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH DICTIONARY --- Create text search parser -CREATE TEXT SEARCH PARSER obj_tsparser ( - START = prsd_start, - GETTOKEN = prsd_nexttoken, - END = prsd_end, - LEXTYPES = prsd_lextype -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH PARSER --- Create text search configuration -CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); -INFO: DDL statement replicated. -CREATE TEXT SEARCH CONFIGURATION --- Create text search template -CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( - INIT = dsimple_init, - LEXIZE = dsimple_lexize -); -INFO: DDL statement replicated. -CREATE TEXT SEARCH TEMPLATE --- Create transform -CREATE TRANSFORM FOR int LANGUAGE SQL ( - FROM SQL WITH FUNCTION prsd_lextype(internal), - TO SQL WITH FUNCTION int4recv(internal)); -INFO: DDL statement replicated. -CREATE TRANSFORM -- Create trigger CREATE TRIGGER obj_trigger AFTER INSERT ON obj_table FOR EACH ROW EXECUTE FUNCTION obj_function(); +NOTICE: Event trigger activated: CREATE TRIGGER INFO: DDL statement replicated. CREATE TRIGGER -- Create type CREATE TYPE obj_composite_type AS (x INT, y INT); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE TYPE obj_enum AS ENUM ('red', 'green', 'blue'); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE CREATE TYPE obj_range AS RANGE (subtype = int4range); +NOTICE: Event trigger activated: CREATE TYPE INFO: DDL statement replicated. CREATE TYPE -- Create view CREATE VIEW obj_view AS SELECT * FROM obj_table; +NOTICE: Event trigger activated: CREATE VIEW INFO: DDL statement replicated. CREATE VIEW --- Create group -CREATE GROUP obj_group; -INFO: DDL statement replicated. -CREATE ROLE --- Create event trigger -CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Event trigger activated: %', tg_tag; -END $$; -INFO: DDL statement replicated. -CREATE FUNCTION -CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); -INFO: DDL statement replicated. -CREATE EVENT TRIGGER +--swtiching back to superuser for validations +RESET ROLE; +RESET -- Meta command validations --- Validate database -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges ---------------+-------+----------+-----------------+-------------+-------------+------------+-----------+------------------- - obj_database | rocky | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding +--------------+-----------+---------- + obj_database | adminuser | UTF8 (1 row) -- Validate extension @@ -253,27 +309,27 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate schema \dn s1 -List of schemas - Name | Owner -------+------- - s1 | rocky + List of schemas + Name | Owner +------+---------- + s1 | appuser3 Publications: "obj_publication" -- Validate foreign data wrapper \dew obj_fdw - List of foreign-data wrappers - Name | Owner | Handler | Validator ----------+-------+---------+----------- - obj_fdw | rocky | - | - + List of foreign-data wrappers + Name | Owner | Handler | Validator +---------+-----------+---------+----------- + obj_fdw | adminuser | - | - (1 row) -- Validate server \des obj_server - List of foreign servers - Name | Owner | Foreign-data wrapper -------------+-------+---------------------- - obj_server | rocky | obj_fdw + List of foreign servers + Name | Owner | Foreign-data wrapper +------------+-----------+---------------------- + obj_server | adminuser | obj_fdw (1 row) -- Validate user mapping @@ -281,23 +337,23 @@ Publications: List of user mappings Server | User name ------------+----------- - obj_server | rocky + obj_server | adminuser (1 row) -- Validate publication \dRp obj_publication - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+-------+------------+---------+---------+---------+-----------+---------- - obj_publication | rocky | f | t | t | t | t | f + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-----------------+-----------+------------+---------+---------+---------+-----------+---------- + obj_publication | adminuser | f | t | t | t | t | f (1 row) -- Validate subscription \dRs obj_subscription - List of subscriptions - Name | Owner | Enabled | Publication -------------------+-------+---------+------------------- - obj_subscription | rocky | f | {obj_publication} + List of subscriptions + Name | Owner | Enabled | Publication +------------------+-----------+---------+------------------- + obj_subscription | adminuser | f | {obj_publication} (1 row) -- Validate cast @@ -316,12 +372,19 @@ Publications: s1 | obj_aggregate | integer | integer | (1 row) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+---------------+----------+---------+-------+------------+-----------+---------------- - s1 | obj_collation | libc | C | C | | | yes +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name +--------+--------------- + s1 | obj_collation (1 row) -- Validate conversion @@ -342,10 +405,10 @@ Publications: -- Validate event trigger \dy obj_event_trigger - List of event triggers - Name | Event | Owner | Enabled | Function | Tags --------------------+-------------------+-------+---------+----------------------------+------ - obj_event_trigger | ddl_command_start | rocky | enabled | obj_function_event_trigger | + List of event triggers + Name | Event | Owner | Enabled | Function | Tags +-------------------+-------------------+-----------+---------+----------------------------+------ + obj_event_trigger | ddl_command_start | adminuser | enabled | obj_function_event_trigger | (1 row) -- Validate foreign table @@ -366,18 +429,18 @@ Publications: -- Validate index \di obj_index - List of relations - Schema | Name | Type | Owner | Table ---------+-----------+-------+-------+----------- - s1 | obj_index | index | rocky | obj_table + List of relations + Schema | Name | Type | Owner | Table +--------+-----------+-------+----------+----------- + s1 | obj_index | index | appuser3 | obj_table (1 row) -- Validate language \dL plperl - List of languages - Name | Owner | Trusted | Description ---------+-------+---------+----------------------------- - plperl | rocky | t | PL/Perl procedural language + List of languages + Name | Owner | Trusted | Description +--------+-----------+---------+----------------------------- + plperl | adminuser | t | PL/Perl procedural language (1 row) -- Validate materialized view @@ -470,26 +533,26 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate type \dT+ obj_composite_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+--------------------+--------------------+-------+----------+-------+-------------------+------------- - s1 | obj_composite_type | obj_composite_type | tuple | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+--------------------+--------------------+-------+----------+----------+-------------------+------------- + s1 | obj_composite_type | obj_composite_type | tuple | | appuser3 | | (1 row) \dT+ obj_enum - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_enum | obj_enum | 4 | red +| rocky | | - | | | | green +| | | - | | | | blue | | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_enum | obj_enum | 4 | red +| appuser3 | | + | | | | green +| | | + | | | | blue | | | (1 row) \dT+ obj_range - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_range | obj_range | var | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+-----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_range | obj_range | var | | appuser3 | | (1 row) -- Validate view diff --git a/t/auto_ddl/6666a_all_objects_create_n1.sql b/t/auto_ddl/6666a_all_objects_create_n1.sql index 5264a05c..99310ec1 100644 --- a/t/auto_ddl/6666a_all_objects_create_n1.sql +++ b/t/auto_ddl/6666a_all_objects_create_n1.sql @@ -1,8 +1,18 @@ --- Create spocktab prepared statement -PREPARE spocktab AS SELECT nspname, relname, set_name FROM spock.tables WHERE relname LIKE $1 ORDER BY relid; +SELECT pg_sleep(1);--to ensure all objects are replicated +--creating the necessary pre-reqs and then switching to the appuser3 (non-superuser) role +CREATE ROLE appuser3 LOGIN; -- Create schema -CREATE SCHEMA s1; +CREATE SCHEMA s1 AUTHORIZATION appuser3; + +GRANT ALL PRIVILEGES ON SCHEMA public TO appuser3; +-- Grant execution rights to the non-superuser +GRANT EXECUTE ON FUNCTION public.get_table_repset_info(TEXT) TO appuser3; + + +---------------- +SET ROLE TO adminuser; +--performing the supseruser operations initially SET search_path TO s1; -- Create database @@ -33,8 +43,76 @@ CREATE PUBLICATION obj_publication FOR TABLES IN SCHEMA s1; -- Create subscription CREATE SUBSCRIPTION obj_subscription CONNECTION '' PUBLICATION obj_publication WITH (connect = false, slot_name = NONE); +-- Create language +CREATE LANGUAGE plperl; + +-- Create function for language internal +CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; + +-- Create foreign table +CREATE FOREIGN TABLE obj_foreign_table ( + id INT, + name TEXT +) SERVER obj_server; + +-- Create operator family +CREATE OPERATOR FAMILY obj_opfamily USING btree; + +-- Create operator class +CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS + OPERATOR 1 < , + OPERATOR 2 <= , + OPERATOR 3 = , + OPERATOR 4 >= , + OPERATOR 5 > , + FUNCTION 1 btint4cmp(int4, int4); + +-- Create text search parser +CREATE TEXT SEARCH PARSER obj_tsparser ( + START = prsd_start, + GETTOKEN = prsd_nexttoken, + END = prsd_end, + LEXTYPES = prsd_lextype +); + +-- Create text search dictionary +CREATE TEXT SEARCH DICTIONARY obj_tsdict ( + TEMPLATE = simple +); + +-- Create text search configuration +CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); + +-- Create text search template +CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( + INIT = dsimple_init, + LEXIZE = dsimple_lexize +); + +-- Create transform +CREATE TRANSFORM FOR int LANGUAGE SQL ( + FROM SQL WITH FUNCTION prsd_lextype(internal), + TO SQL WITH FUNCTION int4recv(internal)); + +-- Create event trigger +CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Event trigger activated: %', tg_tag; +END $$; +CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); + +-- Create group +CREATE GROUP obj_group; + +RESET ROLE; + +-- non super user operations +-- switching to appuser3 with limited privileges +SET ROLE TO appuser3; +SET search_path TO s1; CREATE TYPE obj_type AS (x INT, y INT); + CREATE DOMAIN obj_domain AS INT; -- Create cast CREATE FUNCTION obj_function_cast(obj_type) RETURNS INT LANGUAGE plpgsql AS $$ @@ -44,9 +122,6 @@ END $$; -- Create the cast from obj_type1 to int CREATE CAST (obj_type AS int) WITH FUNCTION obj_function_cast(obj_type) AS IMPLICIT; --- Create aggregate -CREATE FUNCTION int4_sum(state int4, value int4) RETURNS int4 LANGUAGE internal IMMUTABLE STRICT AS 'int4pl'; - -- Create aggregate CREATE AGGREGATE obj_aggregate ( sfunc = int4_sum, @@ -55,22 +130,15 @@ CREATE AGGREGATE obj_aggregate ( initcond = '0' ); - -- Create collation CREATE COLLATION obj_collation (lc_collate = 'C', lc_ctype = 'C'); -- Create conversion CREATE CONVERSION obj_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; - -- Create domain CREATE DOMAIN obj_domain2 AS INT CHECK (VALUE >= 0); --- Create foreign table -CREATE FOREIGN TABLE obj_foreign_table ( - id INT, - name TEXT -) SERVER obj_server; -- Create function CREATE FUNCTION obj_function() RETURNS TRIGGER LANGUAGE plpgsql AS $$ @@ -82,9 +150,6 @@ END $$; CREATE TABLE obj_table (id INT PRIMARY KEY, name TEXT); CREATE INDEX obj_index ON obj_table (name); --- Create language -CREATE LANGUAGE plperl; - -- Create materialized view CREATE MATERIALIZED VIEW obj_mview AS SELECT * FROM obj_table WITH NO DATA; @@ -96,18 +161,6 @@ CREATE OPERATOR ## ( commutator = ## ); --- Create operator family -CREATE OPERATOR FAMILY obj_opfamily USING btree; - --- Create operator class -CREATE OPERATOR CLASS obj_opclass FOR TYPE int4 USING btree FAMILY obj_opfamily AS - OPERATOR 1 < , - OPERATOR 2 <= , - OPERATOR 3 = , - OPERATOR 4 >= , - OPERATOR 5 > , - FUNCTION 1 btint4cmp(int4, int4); - -- Create policy CREATE POLICY obj_policy ON obj_table FOR SELECT TO PUBLIC USING (true); @@ -120,33 +173,6 @@ END $$; -- Create rule CREATE RULE obj_rule AS ON INSERT TO obj_table DO ALSO NOTHING; --- Create text search dictionary -CREATE TEXT SEARCH DICTIONARY obj_tsdict ( - TEMPLATE = simple -); - --- Create text search parser -CREATE TEXT SEARCH PARSER obj_tsparser ( - START = prsd_start, - GETTOKEN = prsd_nexttoken, - END = prsd_end, - LEXTYPES = prsd_lextype -); - --- Create text search configuration -CREATE TEXT SEARCH CONFIGURATION obj_tsconfig (PARSER = obj_tsparser); - --- Create text search template -CREATE TEXT SEARCH TEMPLATE obj_tstemplate ( - INIT = dsimple_init, - LEXIZE = dsimple_lexize -); - --- Create transform -CREATE TRANSFORM FOR int LANGUAGE SQL ( - FROM SQL WITH FUNCTION prsd_lextype(internal), - TO SQL WITH FUNCTION int4recv(internal)); - -- Create trigger CREATE TRIGGER obj_trigger AFTER INSERT ON obj_table FOR EACH ROW EXECUTE FUNCTION obj_function(); @@ -158,20 +184,20 @@ CREATE TYPE obj_range AS RANGE (subtype = int4range); -- Create view CREATE VIEW obj_view AS SELECT * FROM obj_table; --- Create group -CREATE GROUP obj_group; - --- Create event trigger -CREATE FUNCTION obj_function_event_trigger() RETURNS event_trigger LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Event trigger activated: %', tg_tag; -END $$; -CREATE EVENT TRIGGER obj_event_trigger ON ddl_command_start EXECUTE FUNCTION obj_function_event_trigger(); - +--swtiching back to superuser for validations +RESET ROLE; -- Meta command validations --- Validate database -\l obj_database +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; + -- Validate extension \dx "uuid-ossp" @@ -206,8 +232,16 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; -- Validate conversion \dc obj_conversion diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out index bcd01eb5..d1f86c44 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.out @@ -1,9 +1,22 @@ +SELECT pg_sleep(2);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges -------+-------+----------+-----------------+---------+-------+------------+-----------+------------------- +-- due to catalog changes in pg17, we are not using a \l meta command anymore +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding +------+-------+---------- (0 rows) -- Validate extension @@ -32,27 +45,27 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate schema \dn s1 -List of schemas - Name | Owner -------+------- - s1 | rocky + List of schemas + Name | Owner +------+---------- + s1 | appuser3 Publications: "obj_publication" -- Validate foreign data wrapper \dew obj_fdw - List of foreign-data wrappers - Name | Owner | Handler | Validator ----------+-------+---------+----------- - obj_fdw | rocky | - | - + List of foreign-data wrappers + Name | Owner | Handler | Validator +---------+-----------+---------+----------- + obj_fdw | adminuser | - | - (1 row) -- Validate server \des obj_server - List of foreign servers - Name | Owner | Foreign-data wrapper -------------+-------+---------------------- - obj_server | rocky | obj_fdw + List of foreign servers + Name | Owner | Foreign-data wrapper +------------+-----------+---------------------- + obj_server | adminuser | obj_fdw (1 row) -- Validate user mapping @@ -60,15 +73,15 @@ Publications: List of user mappings Server | User name ------------+----------- - obj_server | rocky + obj_server | adminuser (1 row) -- Validate publication \dRp obj_publication - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+-------+------------+---------+---------+---------+-----------+---------- - obj_publication | rocky | f | t | t | t | t | f + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-----------------+-----------+------------+---------+---------+---------+-----------+---------- + obj_publication | adminuser | f | t | t | t | t | f (1 row) -- Validate subscription, should not exist @@ -94,12 +107,19 @@ Publications: s1 | obj_aggregate | integer | integer | (1 row) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+---------------+----------+---------+-------+------------+-----------+---------------- - s1 | obj_collation | libc | C | C | | | yes +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name +--------+--------------- + s1 | obj_collation (1 row) -- Validate conversion @@ -120,10 +140,10 @@ Publications: -- Validate event trigger \dy obj_event_trigger - List of event triggers - Name | Event | Owner | Enabled | Function | Tags --------------------+-------------------+-------+---------+----------------------------+------ - obj_event_trigger | ddl_command_start | rocky | enabled | obj_function_event_trigger | + List of event triggers + Name | Event | Owner | Enabled | Function | Tags +-------------------+-------------------+-----------+---------+----------------------------+------ + obj_event_trigger | ddl_command_start | adminuser | enabled | obj_function_event_trigger | (1 row) -- Validate foreign table @@ -144,18 +164,18 @@ Publications: -- Validate index \di obj_index - List of relations - Schema | Name | Type | Owner | Table ---------+-----------+-------+-------+----------- - s1 | obj_index | index | rocky | obj_table + List of relations + Schema | Name | Type | Owner | Table +--------+-----------+-------+----------+----------- + s1 | obj_index | index | appuser3 | obj_table (1 row) -- Validate language \dL plperl - List of languages - Name | Owner | Trusted | Description ---------+-------+---------+----------------------------- - plperl | rocky | t | PL/Perl procedural language + List of languages + Name | Owner | Trusted | Description +--------+-----------+---------+----------------------------- + plperl | adminuser | t | PL/Perl procedural language (1 row) -- Validate materialized view @@ -248,26 +268,26 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate type \dT+ obj_composite_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+--------------------+--------------------+-------+----------+-------+-------------------+------------- - s1 | obj_composite_type | obj_composite_type | tuple | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+--------------------+--------------------+-------+----------+----------+-------------------+------------- + s1 | obj_composite_type | obj_composite_type | tuple | | appuser3 | | (1 row) \dT+ obj_enum - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_enum | obj_enum | 4 | red +| rocky | | - | | | | green +| | | - | | | | blue | | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_enum | obj_enum | 4 | red +| appuser3 | | + | | | | green +| | | + | | | | blue | | | (1 row) \dT+ obj_range - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------+---------------+------+----------+-------+-------------------+------------- - s1 | obj_range | obj_range | var | | rocky | | + List of data types + Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description +--------+-----------+---------------+------+----------+----------+-------------------+------------- + s1 | obj_range | obj_range | var | | appuser3 | | (1 row) -- Validate view @@ -330,7 +350,7 @@ DROP EXTENSION DROP FOREIGN DATA WRAPPER obj_fdw CASCADE; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to server obj_server -drop cascades to user mapping for rocky on server obj_server +drop cascades to user mapping for adminuser on server obj_server drop cascades to foreign table obj_foreign_table INFO: DDL statement replicated. DROP FOREIGN DATA WRAPPER @@ -425,8 +445,8 @@ INFO: DDL statement replicated. DROP ROLE DROP SCHEMA s1 CASCADE; NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to type obj_type +DETAIL: drop cascades to function int4_sum(integer,integer) +drop cascades to type obj_type drop cascades to type obj_domain -drop cascades to function int4_sum(integer,integer) INFO: DDL statement replicated. DROP SCHEMA diff --git a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql index 124a9e56..6ff7a7b5 100644 --- a/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql +++ b/t/auto_ddl/6666b_all_objects_validate_and_drop_n2.sql @@ -1,6 +1,16 @@ +SELECT pg_sleep(2);--to ensure all objects are replicated + ---- Validate all objects on n2 and then drop them on n2 that should also drop objects on n1 -- Validate database, should not exist -\l obj_database +-- due to catalog changes in pg17, we are not using a \l meta command anymore +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; -- Validate extension \dx "uuid-ossp" @@ -36,8 +46,16 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; -- Validate conversion \dc obj_conversion diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.out b/t/auto_ddl/6666c_all_objects_validate_n1.out index aa5cee2b..4759ba45 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.out +++ b/t/auto_ddl/6666c_all_objects_validate_n1.out @@ -1,3 +1,9 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + pg_sleep +---------- + +(1 row) + --Drop the objects directly on n1 that weren't auto replicated (expected) DROP DATABASE obj_database; WARNING: This DDL statement will not be replicated. @@ -12,10 +18,17 @@ WARNING: This DDL statement will not be replicated. DROP SUBSCRIPTION --Validate all objects on n1 do not exist -- Validate database -\l obj_database - List of databases - Name | Owner | Encoding | Locale Provider | Collate | Ctype | ICU Locale | ICU Rules | Access privileges -------+-------+----------+-----------------+---------+-------+------------+-----------+------------------- +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; + name | owner | encoding +------+-------+---------- (0 rows) -- Validate extension @@ -94,11 +107,18 @@ List of user mappings --------+------+------------------+---------------------+------------- (0 rows) --- Validate collation -\dO obj_collation - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+------------+-----------+---------------- +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + schema | name +--------+------ (0 rows) -- Validate conversion @@ -247,3 +267,10 @@ Did not find any relation named "obj_table". Role name | Attributes -----------+------------ +--role cleanup +DROP OWNED BY appuser3; +INFO: DDL statement replicated. +DROP OWNED +DROP ROLE appuser3; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6666c_all_objects_validate_n1.sql b/t/auto_ddl/6666c_all_objects_validate_n1.sql index 973ba2bf..8a5ba9ab 100644 --- a/t/auto_ddl/6666c_all_objects_validate_n1.sql +++ b/t/auto_ddl/6666c_all_objects_validate_n1.sql @@ -1,3 +1,5 @@ +SELECT pg_sleep(1);--to ensure all objects are replicated + --Drop the objects directly on n1 that weren't auto replicated (expected) DROP DATABASE obj_database; --The tablespace will have to be dropped in the _c file @@ -8,7 +10,15 @@ DROP SUBSCRIPTION obj_subscription; --Validate all objects on n1 do not exist -- Validate database -\l obj_database +-- Validate database (due to catalog changes in pg17, we are not using \l meta command anymore) +SELECT + datname AS name, + pg_catalog.pg_get_userbyid(datdba) AS owner, + pg_catalog.pg_encoding_to_char(encoding) AS encoding +FROM + pg_database +WHERE + datname = 'obj_database'; -- Validate extension \dx "uuid-ossp" @@ -43,8 +53,17 @@ SELECT count(*) FROM pg_tablespace WHERE spcname = 'obj_tablespace'; -- Validate aggregate \da obj_aggregate --- Validate collation -\dO obj_collation +-- Validate collation , using a query instead of meta command as there are catalog changes in pg17 +SELECT + n.nspname AS schema, + c.collname AS name +FROM + pg_collation c +JOIN + pg_namespace n ON n.oid = c.collnamespace +WHERE + c.collname = 'obj_collation'; + -- Validate conversion \dc obj_conversion @@ -114,3 +133,7 @@ WHERE ty.typname = 'int4' AND l.lanname = 'sql'; -- Validate group \dg obj_group + +--role cleanup +DROP OWNED BY appuser3; +DROP ROLE appuser3; diff --git a/t/auto_ddl/6901_env_cleanup_autoddl_n1.out b/t/auto_ddl/6901_env_cleanup_autoddl_n1.out new file mode 100644 index 00000000..ef680696 --- /dev/null +++ b/t/auto_ddl/6901_env_cleanup_autoddl_n1.out @@ -0,0 +1,16 @@ +-- This is a autoddl cleanup file cleaning up all objects created via 6001 setup script +DROP OWNED BY adminuser; +INFO: DDL statement replicated. +DROP OWNED +DROP OWNED BY appuser; +INFO: DDL statement replicated. +DROP OWNED +DROP FUNCTION IF EXISTS public.get_table_repset_info(TEXT); +INFO: DDL statement replicated. +DROP FUNCTION +DROP ROLE IF EXISTS appuser; +INFO: DDL statement replicated. +DROP ROLE +DROP ROLE adminuser; +INFO: DDL statement replicated. +DROP ROLE diff --git a/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql b/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql new file mode 100644 index 00000000..5952a297 --- /dev/null +++ b/t/auto_ddl/6901_env_cleanup_autoddl_n1.sql @@ -0,0 +1,11 @@ +-- This is a autoddl cleanup file cleaning up all objects created via 6001 setup script + +DROP OWNED BY adminuser; + +DROP OWNED BY appuser; + +DROP FUNCTION IF EXISTS public.get_table_repset_info(TEXT); + +DROP ROLE IF EXISTS appuser; + +DROP ROLE adminuser; diff --git a/t/cleanup_01_noderemove.py b/t/cleanup_01_node_remove.py similarity index 92% rename from t/cleanup_01_noderemove.py rename to t/cleanup_01_node_remove.py index 6f5ed640..9cbdcb12 100644 --- a/t/cleanup_01_noderemove.py +++ b/t/cleanup_01_node_remove.py @@ -25,11 +25,13 @@ util_test.printres(res) if res.returncode != 0: util_test.exit_message(f"Couldn't remove node {n}") + cmd_node = f"remove backrest" + res=util_test.run_nc_cmd("Remove", cmd_node, nodedir) modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"um list" diff --git a/t/cleanup_02_remove.py b/t/cleanup_02_remove.py new file mode 100644 index 00000000..013000bf --- /dev/null +++ b/t/cleanup_02_remove.py @@ -0,0 +1,47 @@ +import sys, os, util_test, subprocess +import json + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() + +ncdir = os.getenv("NC_DIR") +homedir = os.getenv("EDGE_HOME_DIR") +clusterdir = os.getenv('EDGE_CLUSTER_DIR') +numnodes = int(os.getenv('EDGE_NODES')) +pgname = os.getenv('EDGE_COMPONENT') + +## First Cleanup Script- Removes Nodes + +nodedir = os.path.join(clusterdir, f"nc", "pgedge") + +cmd_node = f"remove {pgname} --rm-data" +res=util_test.run_nc_cmd("Remove", cmd_node, nodedir) +util_test.printres(res) +if res.returncode != 0: + util_test.exit_message(f"Couldn't remove nc node") + +modules = { + pgname: False, + f"snowflake-{pgname}": False, + f"spock": False +} + +cmd_node = f"um list" +res=util_test.run_nc_cmd("List", cmd_node, nodedir) +util_test.printres(res) + +for line in res.stdout.strip().split("\\n"): + for key in modules.keys(): + if key in line and "Installed" in line: + modules[key] = True + +for key in modules.keys(): + if modules[key]: + util_test.exit_message(f"Faild, module {key} still installed") + else: + print(f"Module {key} was removed") + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) diff --git a/t/cleanup_02_pgremove.py b/t/cleanup_03_remove_nc.py similarity index 100% rename from t/cleanup_02_pgremove.py rename to t/cleanup_03_remove_nc.py diff --git a/t/cluster-add-node.py b/t/cluster-add-node.py index 9258079e..498021ce 100644 --- a/t/cluster-add-node.py +++ b/t/cluster-add-node.py @@ -58,8 +58,7 @@ ## Create the json file for n4: -data = {'json_version': 1.0, 'node_groups': [{'ssh': {'os_user': 'ec2-user', 'private_key': ''}, 'name': 'n4', 'is_active': 'on', 'public_ip': '127.0.0.1', 'private_ip': '127.0.0.1', -'port': '6435', 'path': '/home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n4'}]} +data = {'json_version': 1.1, 'node_groups': [{'ssh': {'os_user': 'ec2-user', 'private_key': ''}, 'name': 'n4', 'is_active': 'on', 'public_ip': '127.0.0.1', 'private_ip': '127.0.0.1', 'port': '6435', 'path': '/home/ec2-user/work/platform_test/nc/pgedge/cluster/demo/n4', 'replicas': '0'}]} file_name = 'n4.json' @@ -71,7 +70,7 @@ source = (f"n4.json") target = (f"{home_dir}/n4.json") -#print(f"home_dir = {home_dir}\n") +print(f"home_dir = {home_dir}\n") print(f"We need to copy that file to: {home_dir}") shutil.move(source, target) print("*"*100) diff --git a/t/cluster-init-2-node-cluster.py b/t/cluster-init-2-node-cluster.py new file mode 100644 index 00000000..28a2acc5 --- /dev/null +++ b/t/cluster-init-2-node-cluster.py @@ -0,0 +1,78 @@ +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +pgv=os.getenv("EDGE_INST_VERSION") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") +dbname=os.getenv("EDGE_DB","lcdb") + +cwd=os.getcwd() +num_nodes=2 +port=6432 +port1=6433 + +#print("*"*100) + +print(f"home_dir = {home_dir}\n") +command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --pg_ver={pgv} --port={port} --force") +res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") +print(f"res = {res}\n") + + +port_0 = (f"{port}") +port_1 = (f"{port1}") +pg_ver = (f"{pgv}") +new_ver = (f"{spockver}") +print(f"Spock new version is: {new_ver}") +new_path_0 = (f"{cwd}/{cluster_dir}/n1") +new_path_1 = (f"{cwd}/{cluster_dir}/n2") + + +with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: + data = json.load(file) + #print(data) + data["pgedge"]["pg_version"] = pg_ver + data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["port"] = port_0 + data["node_groups"][1]["port"] = port_1 + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + +newdata = json.dumps(data, indent=4) +with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: + file.write(newdata) + +print(newdata) + +command = (f"cluster init {cluster_name}") +init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") +print(f"init = {init.stdout}\n") +print("*"*100) + + +# Needle and Haystack +# Confirm the command worked by looking for: + +if "\nSyntaxError" not in str(init.stdout) or init.returncode == 1: + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + + + diff --git a/t/cluster-init-bad-json.py b/t/cluster-init-bad-json.py index c238150c..7b37d186 100644 --- a/t/cluster-init-bad-json.py +++ b/t/cluster-init-bad-json.py @@ -1,4 +1,4 @@ -import sys, os, util_test,subprocess +import sys, os, util_test, subprocess, json # Print Script print(f"Starting - {os.path.basename(__file__)}") @@ -22,6 +22,7 @@ tmpcluster = "holdings" file_name = (f"{tmpcluster}.json") +cwd=os.getcwd() # # Use cluster json-template to create a template file: @@ -32,22 +33,35 @@ print(f"res = {res}\n") print("*"*100) -# + +# We're going to modify the path in our .json file so the new cluster lands in our nc directory: +new_path_0 = (f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}/n1") +new_path_1 = (f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}/n2") + +print(f"new_path_0: {new_path_0}") +print(f"new_path_1: {new_path_1}") + +with open(f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}.json", 'r') as file: + data = json.load(file) + print(data) + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + +newdata = json.dumps(data, indent=4) +with open(f"{cwd}/{home_dir}/cluster/{tmpcluster}/{tmpcluster}.json", 'w') as file: + file.write(newdata) + # Use cluster init to initialize the cluster defined in the template file. # This will throw an error because both ports in the json file are the same. # command = (f"cluster init {tmpcluster}") -res1=util_test.run_nc_cmd("This command attempts to initialize the cluster", command, f"{home_dir}") -print(f"The attempt to initialize returns = {res1.returncode}\n") -print(f"The attempt to initialize the cluster should fail = {res1.stdout}\n") +res1=util_test.run_nc_cmd("This command initializes the cluster", command, f"{home_dir}") +print(f"res1.returncode contains = {res1.returncode}\n") +print(f"res1.stdout contains = {res1.stdout}\n") print("*"*100) -# Per Cady, the way the functionality is coded, it returns a 0 until we account for the errors. -# This seems a bit backwards, but we'll check for 0 and FAILED: -if res1.returncode == 0 and "FAILED" in str(res1.stdout): - print("This case should return: ERROR: Cannot install over a non-empty 'pgedge' directory.") - print("The JSON file is unmodified, so it installs twice into the same port") +if res1.returncode == 0 and "FAILED" not in str(res1.stdout): util_test.EXIT_PASS() else: diff --git a/t/cluster-init-bad-version.py b/t/cluster-init-bad-version.py new file mode 100644 index 00000000..ba651669 --- /dev/null +++ b/t/cluster-init-bad-version.py @@ -0,0 +1,75 @@ +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") +dbname=os.getenv("EDGE_DB","lcdb") + +cwd=os.getcwd() +num_nodes=3 + +## Set Postgres version to an invalid version; we'll use 14 since it's deprecated, so this might not fail on all branches right away. +pgv="14" + +#print("*"*100) + +print(f"home_dir = {home_dir}\n") +command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") +print(f"res = {res}\n") + +new_ver = (f"{spockver}") +print(new_ver) +new_path_0 = (f"{cwd}/{cluster_dir}/n1") +new_path_1 = (f"{cwd}/{cluster_dir}/n2") +new_path_2 = (f"{cwd}/{cluster_dir}/n3") + + +with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: + data = json.load(file) + #print(data) + data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["path"] = new_path_0 + data["node_groups"][1]["path"] = new_path_1 + data["node_groups"][2]["path"] = new_path_2 + +newdata = json.dumps(data, indent=4) +with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: + file.write(newdata) + +print(newdata) + +command = (f"cluster init {cluster_name}") +init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") +print(f"init = {init.stdout}\n") +print("*"*100) + +## Note: this is a negative test, so the search is looking for the phrase [FAILED] in the results. In this case, passing the test +## while failing the installation is the desired behavior! + +# Needle and Haystack +# Confirm the command worked by looking for: + +if "[FAILED]" in str(init.stdout) or init.returncode == 1: + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + + + diff --git a/t/cluster-init-force-spock-version.py b/t/cluster-init-force-spock-version.py index 369881da..235a1f2f 100644 --- a/t/cluster-init-force-spock-version.py +++ b/t/cluster-init-force-spock-version.py @@ -18,8 +18,7 @@ repuser=os.getenv("EDGE_REPUSER","susan") repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") -spockver=("EDGE_SPOCK_DEFAULT_VER","3.3.6") -spockpinver=("EDGE_SPOCK_PINNED_VER","3.3.6") +spockver=("EDGE_SPOCK_VER","3.3.6") dbname=os.getenv("EDGE_DB","lcdb") cwd=os.getcwd() @@ -64,7 +63,7 @@ # Needle and Haystack # Confirm the command worked by looking for: -if "\nSyntaxError" not in str(init.stdout) or init.returncode == 1: +if "[FAILED]" not in str(init.stdout) or init.returncode == 1: util_test.EXIT_PASS() else: diff --git a/t/cluster-init.py b/t/cluster-init.py index 7d173ba8..60a156b3 100644 --- a/t/cluster-init.py +++ b/t/cluster-init.py @@ -18,19 +18,28 @@ repuser=os.getenv("EDGE_REPUSER","susan") repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") +spockver=os.getenv("EDGE_SPOCK_VER","4.0.1") dbname=os.getenv("EDGE_DB","lcdb") cwd=os.getcwd() num_nodes=3 - +port=6432 +port1=6433 +port2=6434 #print("*"*100) print(f"home_dir = {home_dir}\n") -command = (f"cluster json-template {cluster_name} {dbname} {num_nodes} {usr} {pw} {pgv} {port}") +command = (f"cluster json-create {cluster_name} {num_nodes} {dbname} {usr} {pw} --port={port} --pg_ver={pgv} --force") res=util_test.run_nc_cmd("This command should create a json file that defines a cluster", command, f"{home_dir}") print(f"res = {res}\n") +pg_ver = (f"{pgv}") +new_ver = (f"{spockver}") +port_0 = (f"{port}") +port_1 = (f"{port1}") +port_2 = (f"{port2}") +print(f"Spock new version, pg version, port, port_01, port_02 is: {new_ver}, {pg_ver}, {port_0}, {port_1}, {port_2}") new_path_0 = (f"{cwd}/{cluster_dir}/n1") new_path_1 = (f"{cwd}/{cluster_dir}/n2") new_path_2 = (f"{cwd}/{cluster_dir}/n3") @@ -38,25 +47,21 @@ with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: data = json.load(file) - #print(data) + #print(f"Line 49 - {data}") + data["pgedge"]["pg_version"] = pg_ver + data["pgedge"]["spock"]["spock_version"] = new_ver + data["node_groups"][0]["port"] = port_0 + data["node_groups"][1]["port"] = port_1 + data["node_groups"][2]["port"] = port_2 data["node_groups"][0]["path"] = new_path_0 data["node_groups"][1]["path"] = new_path_1 data["node_groups"][2]["path"] = new_path_2 - - -#with open(f"{cluster_dir}/{cluster_name}.json", 'r') as file: -# data = json.load(file) -# #print(data) -# data["node_groups"][0]["nodes"][0]["path"] = new_path_0 -# data["node_groups"][1]["nodes"][0]["path"] = new_path_1 -# data["node_groups"][2]["nodes"][0]["path"] = new_path_2 - newdata = json.dumps(data, indent=4) with open(f"{cluster_dir}/{cluster_name}.json", 'w') as file: file.write(newdata) - +print(newdata) command = (f"cluster init {cluster_name}") init=util_test.run_nc_cmd("This command should initialize a cluster based on the json file", command, f"{home_dir}") diff --git a/t/cluster-remove-node.py b/t/cluster-remove-node.py index c810ed6a..7f6830d2 100644 --- a/t/cluster-remove-node.py +++ b/t/cluster-remove-node.py @@ -52,6 +52,7 @@ print(f"Successful command: {command2}") print(f"The successful remove-node command returns = {res2}\n") print("*"*100) +print("This test case only removes the replication artifacts. The PG installation, data directory, and n3 subdir will remain") # Needle and Haystack # Confirm the command worked by looking for: diff --git a/t/column_filtering.pl b/t/column_filtering.pl index 0bdc264b..1f062e4b 100644 --- a/t/column_filtering.pl +++ b/t/column_filtering.pl @@ -198,11 +198,9 @@ exit(1); } -## Needle and Haystack - Note - this case was erroneously passing before. I've updated the 'needle' to include the content -# that should be returned when column filtering works, but the number of spaces in the NULL column may need to be adjusted -# when the fix is in: +## Needle and Haystack -if(contains(@$stdout_buf7[0], "8 | | Alice | Adams | 18 Austin Blvd | Austin, TX | | US |")) +if(contains(@$stdout_buf7[0], "8 | | Alice | Adams | 18 Austin Blvd")) { exit(0); } diff --git a/t/install_PGs_and_exercise_service.py b/t/install_PGs_and_exercise_service.py new file mode 100644 index 00000000..a9cd5b6b --- /dev/null +++ b/t/install_PGs_and_exercise_service.py @@ -0,0 +1,60 @@ +## This script finds all of the available versions of PG and installs each version with the setup command. + +import sys, os, util_test, subprocess, json + +# Print Script +print(f"Starting - {os.path.basename(__file__)}") + +# Get Test Settings +util_test.set_env() +repo=os.getenv("EDGE_REPO") +pgv=os.getenv("EDGE_INST_VERSION") +num_nodes=int(os.getenv("EDGE_NODES",2)) +home_dir=os.getenv("EDGE_HOME_DIR") +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +cluster_name=os.getenv("EDGE_CLUSTER","demo") +port=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","lcusr") +pw=os.getenv("EDGE_PASSWORD","password") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","susan") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") + +## Create the variables we'll be using in this script: +components = [] +versions = "" +## Service options tested by this script are: +services = ["status","stop","start","restart","reload","enable","disable","config"] +#print("*"*100) + +## We'll call find_pg_versions to return a list of Postgres {versions} available through UM. +versions,components=util_test.find_pg_versions(home_dir) + +## Then loop through the versions and install each version with the setup command: +for version in versions: + ## Find a free port for the PG installation; call get_avail_port and pass in the port number: + free_port=util_test.get_avail_ports(port) + install_pg=(f"setup -U {usr} -d {dbname} -P {pw} --port={free_port} --pg_ver={version}") + print(f"The setup command executing now is: {install_pg}") + installed_res=util_test.run_nc_cmd("Installing Postgres versions available", install_pg, f"{home_dir}") + print(installed_res) + + ## Increase the port value by 1 before installing the next version of Postgres: + port = port + 1 + + ## Check to see if the installation was successful + if installed_res.returncode == 0: + print(f"Command succeeded for Postgres {version}:{installed_res.stdout}") + +for component in components: + for svc in services: + ## Exercise the installed Postgres services + print(f"component: {component}") + print(f"svc: {svc}") + command = (f"service {svc} --component={component}") + exercise_svc=util_test.run_nc_cmd("Exercising the service", command, f"{home_dir}") + print(f"The command to exercise the service contains: {exercise_svc}") + + diff --git a/t/lib/config.env b/t/lib/config.env index 5631b368..c6c925df 100644 --- a/t/lib/config.env +++ b/t/lib/config.env @@ -1,6 +1,6 @@ # Use this file to set a group of values to environment variables; you can source this file to set all the values at once. export EDGE_INSTALL_SCRIPT=install.py -export REPO=https://pgedge-upstream.s3.amazonaws.com/REPO +export REPO=https://pgedge-devel.s3.amazonaws.com/REPO export EDGE_REPO=$REPO/$EDGE_INSTALL_SCRIPT export EDGE_HOST=127.0.0.1 @@ -27,16 +27,20 @@ export EDGE_DB="lcdb" export EDGE_REPUSER=`whoami` # postgres version details -export EDGE_INST_VERSION=16 +export EDGE_INST_VERSION=17 export EDGE_COMPONENT="pg$EDGE_INST_VERSION" -# spock version to install, if pinned_ver has a value, it will be prioritised over default_ver -# keep pinned_ver empty if you want to use spocks default version +# Leave spock_ver empty if you want to use spocks default version # As of 1st August 2024, spock40 is the default pinned version -export EDGE_SPOCK_DEFAULT_VER="4.0" -export EDGE_SPOCK_PINNED_VER="" + +# As of 10/23/24: Note that if the spock version is empty, cluster add-node will FAIL: +# It will return an error: ERROR: function spock.set_cluster_readonly() does not exist +export EDGE_SPOCK_VER="4.0.5" export EDGE_CLI="pgedge" # Path to store autoddl related actual outputs export EDGE_ACTUAL_OUT_DIR="/tmp/auto_ddl/" + +# To ensure locale related outputs (such as monetary values) stay consistent +export LC_ALL="C.UTF-8" diff --git a/t/020_nodectl_install_pgedge.pl b/t/maybe_delete/020_nodectl_install_pgedge.pl similarity index 100% rename from t/020_nodectl_install_pgedge.pl rename to t/maybe_delete/020_nodectl_install_pgedge.pl diff --git a/t/020cf_nodectl_install_pgedge.pl b/t/maybe_delete/020cf_nodectl_install_pgedge.pl similarity index 100% rename from t/020cf_nodectl_install_pgedge.pl rename to t/maybe_delete/020cf_nodectl_install_pgedge.pl diff --git a/t/100_setup_script.pl b/t/maybe_delete/100_setup_script.pl similarity index 100% rename from t/100_setup_script.pl rename to t/maybe_delete/100_setup_script.pl diff --git a/t/105_remove_pgedge.pl b/t/maybe_delete/105_remove_pgedge.pl similarity index 100% rename from t/105_remove_pgedge.pl rename to t/maybe_delete/105_remove_pgedge.pl diff --git a/t/2990_service_breakdown_with_control_check_pg16.pl b/t/maybe_delete/2990_service_breakdown_with_control_check_pg16.pl similarity index 100% rename from t/2990_service_breakdown_with_control_check_pg16.pl rename to t/maybe_delete/2990_service_breakdown_with_control_check_pg16.pl diff --git a/t/5001_cluster_build.py b/t/maybe_delete/5001_cluster_build.py similarity index 100% rename from t/5001_cluster_build.py rename to t/maybe_delete/5001_cluster_build.py diff --git a/t/5002_build_cluster_with_n_nodes.py b/t/maybe_delete/5002_build_cluster_with_n_nodes.py similarity index 100% rename from t/5002_build_cluster_with_n_nodes.py rename to t/maybe_delete/5002_build_cluster_with_n_nodes.py diff --git a/t/5003_sub_env.py b/t/maybe_delete/5003_sub_env.py similarity index 100% rename from t/5003_sub_env.py rename to t/maybe_delete/5003_sub_env.py diff --git a/t/5004_node_list.py b/t/maybe_delete/5004_node_list.py similarity index 100% rename from t/5004_node_list.py rename to t/maybe_delete/5004_node_list.py diff --git a/t/5005_rep_list.py b/t/maybe_delete/5005_rep_list.py similarity index 100% rename from t/5005_rep_list.py rename to t/maybe_delete/5005_rep_list.py diff --git a/t/5006_sub_drop.py b/t/maybe_delete/5006_sub_drop.py similarity index 100% rename from t/5006_sub_drop.py rename to t/maybe_delete/5006_sub_drop.py diff --git a/t/5007_repset_drop.py b/t/maybe_delete/5007_repset_drop.py similarity index 100% rename from t/5007_repset_drop.py rename to t/maybe_delete/5007_repset_drop.py diff --git a/t/5008_node_drop.py b/t/maybe_delete/5008_node_drop.py similarity index 100% rename from t/5008_node_drop.py rename to t/maybe_delete/5008_node_drop.py diff --git a/t/8000a_env_setup_pgedge_node1.pl b/t/maybe_delete/8000a_env_setup_pgedge_node1.pl similarity index 100% rename from t/8000a_env_setup_pgedge_node1.pl rename to t/maybe_delete/8000a_env_setup_pgedge_node1.pl diff --git a/t/8000b_install_pgedge_node1.pl b/t/maybe_delete/8000b_install_pgedge_node1.pl similarity index 100% rename from t/8000b_install_pgedge_node1.pl rename to t/maybe_delete/8000b_install_pgedge_node1.pl diff --git a/t/8001a_env_setup_pgedge_node2.pl b/t/maybe_delete/8001a_env_setup_pgedge_node2.pl similarity index 100% rename from t/8001a_env_setup_pgedge_node2.pl rename to t/maybe_delete/8001a_env_setup_pgedge_node2.pl diff --git a/t/8001b_install_pgedge_node2.pl b/t/maybe_delete/8001b_install_pgedge_node2.pl similarity index 100% rename from t/8001b_install_pgedge_node2.pl rename to t/maybe_delete/8001b_install_pgedge_node2.pl diff --git a/t/8002a_env_setup_pgedge_node3.pl b/t/maybe_delete/8002a_env_setup_pgedge_node3.pl similarity index 100% rename from t/8002a_env_setup_pgedge_node3.pl rename to t/maybe_delete/8002a_env_setup_pgedge_node3.pl diff --git a/t/8002b_install_pgedge_node3.pl b/t/maybe_delete/8002b_install_pgedge_node3.pl similarity index 100% rename from t/8002b_install_pgedge_node3.pl rename to t/maybe_delete/8002b_install_pgedge_node3.pl diff --git a/t/8999_env_remove_pgedge_node2.pl b/t/maybe_delete/8999_env_remove_pgedge_node2.pl similarity index 100% rename from t/8999_env_remove_pgedge_node2.pl rename to t/maybe_delete/8999_env_remove_pgedge_node2.pl diff --git a/t/8999b_env_remove_pgedge_node3.pl b/t/maybe_delete/8999b_env_remove_pgedge_node3.pl similarity index 100% rename from t/8999b_env_remove_pgedge_node3.pl rename to t/maybe_delete/8999b_env_remove_pgedge_node3.pl diff --git a/t/902_create_cluster-local_n1.pl b/t/maybe_delete/902_create_cluster-local_n1.pl similarity index 100% rename from t/902_create_cluster-local_n1.pl rename to t/maybe_delete/902_create_cluster-local_n1.pl diff --git a/t/903_create_cluster-local_n2.pl b/t/maybe_delete/903_create_cluster-local_n2.pl similarity index 100% rename from t/903_create_cluster-local_n2.pl rename to t/maybe_delete/903_create_cluster-local_n2.pl diff --git a/t/906_check_node_one.pl b/t/maybe_delete/906_check_node_one.pl similarity index 100% rename from t/906_check_node_one.pl rename to t/maybe_delete/906_check_node_one.pl diff --git a/t/998_breakdown_cluster.pl b/t/maybe_delete/998_breakdown_cluster.pl similarity index 100% rename from t/998_breakdown_cluster.pl rename to t/maybe_delete/998_breakdown_cluster.pl diff --git a/t/9998_remove_nc_and_pgpass_dirs.py b/t/maybe_delete/9998_remove_nc_and_pgpass_dirs.py similarity index 100% rename from t/9998_remove_nc_and_pgpass_dirs.py rename to t/maybe_delete/9998_remove_nc_and_pgpass_dirs.py diff --git a/t/999_gsg_breakdown.pl b/t/maybe_delete/999_gsg_breakdown.pl similarity index 100% rename from t/999_gsg_breakdown.pl rename to t/maybe_delete/999_gsg_breakdown.pl diff --git a/t/setup_02_setup.py b/t/setup_02_setup.py new file mode 100644 index 00000000..42b62a1b --- /dev/null +++ b/t/setup_02_setup.py @@ -0,0 +1,66 @@ +import sys, os, util_test, subprocess +import json + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() + +ncdir = os.getenv("NC_DIR") +homedir = os.getenv("EDGE_HOME_DIR") +clusterdir = os.getenv('EDGE_CLUSTER_DIR') +numnodes = int(os.getenv('EDGE_NODES')) +clicommand = os.getenv('EDGE_CLI') +pgusn = os.getenv('EDGE_USERNAME') +pgpsw = os.getenv('EDGE_PASSWORD') +dbname = os.getenv('EDGE_DB') +startport = int(os.getenv('EDGE_START_PORT')) +pgversion = os.getenv('EDGE_INST_VERSION') +pgname = os.getenv('EDGE_COMPONENT') +spockver = os.getenv('EDGE_SPOCK_VER') + +## Second Setup Script- Setup pgEdge Single Instance for Testing + +os.chdir(os.path.join(f"nc", "pgedge")) + +# Deletes copydir +cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport} --pg_ver {pgversion}" + +if spockver: + cmd_node = f"{cmd_node} --spock_ver \"{spockver}\"" +res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) +util_test.printres(res) +if res.returncode == 1: + util_test.exit_message(f"Faild {cmd_node}") +if "already installed" in res.stdout: + print("PG Already Running on Node") + +modules = { + pgname: False, + f"snowflake-{pgname}": False, + f"spock": False +} + +cmd_node = f"./{clicommand} um list" +res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) +util_test.printres(res) + +for line in res.stdout.strip().split("\\n"): + for key in modules.keys(): + if key in line and "Installed" in line: + modules[key] = True + if key == "spock" and spockver: + if spockver in line: + print(f"Correct spock ver {spockver} is installed") + else: + util_test.exit_message(f"Faild, wrong spock ver {spockver} installed") + +for key in modules.keys(): + if modules[key]: + print(f"Module {key} is installed") + else: + util_test.exit_message(f"Faild, module {key} not installed") + +os.chdir("../..") +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) diff --git a/t/setup_02_nodecreate.py b/t/setup_03_node_install.py similarity index 100% rename from t/setup_02_nodecreate.py rename to t/setup_03_node_install.py diff --git a/t/setup_03_noderun.py b/t/setup_04_node_setup.py similarity index 78% rename from t/setup_03_noderun.py rename to t/setup_04_node_setup.py index 95f83081..1c4257f7 100644 --- a/t/setup_03_noderun.py +++ b/t/setup_04_node_setup.py @@ -18,7 +18,7 @@ startport = int(os.getenv('EDGE_START_PORT')) pgversion = os.getenv('EDGE_INST_VERSION') pgname = os.getenv('EDGE_COMPONENT') -spockver = os.getenv('EDGE_SPOCK_DEFAULT_VER') +spockver = os.getenv('EDGE_SPOCK_VER') ## Third Setup Script- Turns on Nodes for Testing @@ -27,7 +27,10 @@ os.chdir(os.path.join(f"n{n}", "pgedge")) # Deletes copydir - cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport + n - 1} --pg_ver {pgversion} --spock_ver \"{spockver}\"" + cmd_node = f"./{clicommand} setup -U {pgusn} -P {pgpsw} -d {dbname} -p {startport + n - 1} --pg_ver {pgversion}" + if spockver: + cmd_node = f"{cmd_node} --spock_ver \"{spockver}\"" + res=subprocess.run(cmd_node, shell=True, capture_output=True, text=True) util_test.printres(res) if res.returncode == 1: @@ -39,7 +42,7 @@ modules = { pgname: False, f"snowflake-{pgname}": False, - f"spock33-{pgname}": False + f"spock": False } cmd_node = f"./{clicommand} um list" @@ -50,6 +53,12 @@ for key in modules.keys(): if key in line and "Installed" in line: modules[key] = True + if key == "spock" and spockver: + if spockver in line: + print(f"Correct spock ver {spockver} is installed") + else: + util_test.exit_message(f"Faild, wrong spock ver {spockver} installed") + for key in modules.keys(): if modules[key]: diff --git a/t/spock_2_node_create.py b/t/spock_2_node_create.py index 5aa27465..adc32df7 100644 --- a/t/spock_2_node_create.py +++ b/t/spock_2_node_create.py @@ -28,13 +28,6 @@ def run(): util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Node Create", 1) port = port + 1 - ## Metrics Check Test - cmd_node = f"spock metrics-check {db}" - res=util_test.run_cmd("Metrics Check", cmd_node, f"{cluster_dir}/n1") - print(res) - if res.returncode == 1 or "mount_point" not in res.stdout: - util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Metrics Check", 1) - if __name__ == "__main__": ## Print Script print(f"Starting - {os.path.basename(__file__)}") diff --git a/t/spock_3_sub_create_parallel.py b/t/spock_3_sub_create_parallel.py new file mode 100644 index 00000000..f191c165 --- /dev/null +++ b/t/spock_3_sub_create_parallel.py @@ -0,0 +1,70 @@ +# This test case (and the other spock_# tests) expect to be run against a two node cluster. +# If it fails with an error: pg_reload_conf \n----------------\n t\n(1 row)\n\nSet GUC snowflake.node to 1\n[\n {\n ... +# you are probably running against a 3 node cluster. +# Per conversation with Cady, we may want to use a new setup script written in .py that uses the same +# logic as 8000a/8000b, but that uses the environment variable values. + +import os, util_test, subprocess + +## Get Test Settings +util_test.set_env() + +def run(): + # Get environment variables + num_nodes = int(os.getenv("EDGE_NODES", 2)) + cluster_dir = os.getenv("EDGE_CLUSTER_DIR") + port=int(os.getenv("EDGE_START_PORT",6432)) + repuser=os.getenv("EDGE_REPUSER","pgedge") + pw=os.getenv("EDGE_PASSWORD","lcpasswd") + db=os.getenv("EDGE_DB","lcdb") + host=os.getenv("EDGE_HOST","localhost") + spock_delay=os.getenv("SPOCK_DELAY", None) + + parallel_array = [1,2] + + port_array = [] + for n in range(1,num_nodes+1): + port_array.append(port) + port = port + 1 + + for n in range(1,num_nodes+1): + for z in range(1,num_nodes+1): + for p in parallel_array: + if n!=z: + ## Create Subs + cmd_node = f"spock sub-create sub_n{n}n{z}_{p} 'host=127.0.0.1 port={port_array[z-1]} user={repuser} dbname={db}' {db}" + + if spock_delay is not None: + try: + spock_delay = int(spock_delay) + cmd_node += f" -a={spock_delay}" + + except Exception as e: + print(f"Error in getting spock_delay: {e}") + + res=util_test.run_cmd("Sub Create", cmd_node, f"{cluster_dir}/n{n}") + print(res) + if res.returncode == 1 or "sub_create" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Sub Create", 1) + + print(f"Line 49, print db: {db}") + + ## Sub Show Status Test + cmd_node = (f"spock sub-show-status sub_n1n2_1 {db}") + res=util_test.run_cmd("Sub Show Status", cmd_node, f"{cluster_dir}/n1") + print(res) + if res.returncode == 1 or "replicating" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Sub Show Status", 1) + + ## Node List Test + cmd_node = f"spock node-list {db}" + res=util_test.run_cmd("Node List", cmd_node, f"{cluster_dir}/n1") + print(res) + if res.returncode == 1 or "n2" not in res.stdout: + util_test.exit_message(f"Fail - {os.path.basename(__file__)} - Node List", 1) + +if __name__ == "__main__": + ## Print Script + print(f"Starting - {os.path.basename(__file__)}") + run() + util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) diff --git a/t/spock_create_sub_specify_repsets.py b/t/spock_create_sub_specify_repsets.py index fcb46ea8..49f98b32 100644 --- a/t/spock_create_sub_specify_repsets.py +++ b/t/spock_create_sub_specify_repsets.py @@ -19,14 +19,10 @@ dbname=os.getenv("EDGE_DB","lcdb") # # Create a subscription with an array of repsets; this is the 'happy path' testcase. -# First, we clean up the environment to remove the subscription. - -check_value = util_test.read_psql("select sub_name from spock.subscription;",host,dbname,port,pw,usr).strip("[]") -if "my_test_sub" in str(check_value): - drop_sub = f"spock sub-drop my_test_sub dbname={dbname}" - drop=util_test.run_cmd("Run spock sub-drop to prepare for test.", drop_sub, f"{cluster_dir}/n1") print("*"*100) +## The arguments for the following command have to be in the same order as documented: + command = f"spock sub-create my_test_sub 'host={host} port={port} user={repuser} dbname={dbname}' {dbname} -r 'this_repset,that_repset,the_other_repset'" res=util_test.run_cmd("Run spock sub-create -r.", command, f"{cluster_dir}/n1") print(f"Print our command here: {command}") diff --git a/t/spock_exception_table_case_discard1.py b/t/spock_exception_table_case_discard1.py new file mode 100644 index 00000000..53bcf939 --- /dev/null +++ b/t/spock_exception_table_case_discard1.py @@ -0,0 +1,186 @@ +import sys, os, util_test, subprocess, time + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") +port2=port1+1 +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + + +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case1 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case1 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case1 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Test Steps +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case1 VALUES (2, 70000, null); +END $$; +""" + +print(f"Executing the anonymous block: anon_block") +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case1;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE case1 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1: +row = util_test.write_psql("UPDATE case1 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case1;",host,dbname,port,pw,usr) + print(f"SELECT * from case1 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + +## Confirm the test results from the spock.exception_log: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case1';",host,dbname,port2,pw,usr) +print(f"TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_discard2.py b/t/spock_exception_table_case_discard2.py new file mode 100644 index 00000000..caa9f2c6 --- /dev/null +++ b/t/spock_exception_table_case_discard2.py @@ -0,0 +1,221 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 46 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 49 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 53 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 58 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 61 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case2 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case2 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case2 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"{n} is the value in n") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case2 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO case2 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case2;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case2 VALUES (33, 33000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case2;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") + +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE case2 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE case2 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n2 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM case2",host,dbname,port2,pw,usr).strip("[]") +print(f"TEST STEP: bid 11 should be updated on n2, case2 contains: {row}") +print("*"*100) + +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case2';",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") +print("*"*100) + +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + diff --git a/t/spock_exception_table_case_discard3.py b/t/spock_exception_table_case_discard3.py new file mode 100644 index 00000000..54cbd031 --- /dev/null +++ b/t/spock_exception_table_case_discard3.py @@ -0,0 +1,200 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'discard', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case3 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case3 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case3 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + ## Check replication + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case3 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 and n2 should both contain bid 1/11") +print("*"*100) + + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case3 VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO case3 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case3;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case3';",host,dbname,port2,pw,usr) +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_sub-disable1.py b/t/spock_exception_table_case_sub-disable1.py new file mode 100644 index 00000000..bfcdcd13 --- /dev/null +++ b/t/spock_exception_table_case_sub-disable1.py @@ -0,0 +1,188 @@ +import sys, os, util_test, subprocess, time + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") +port2=port1+1 +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + + +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case11 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case11 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case11 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Test Steps +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case11 VALUES (2, 70000, null); +END $$; +""" + +print(f"Executing the anonymous block: anon_block") +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case11;",host,dbname,port,pw,usr) + print(f"Line 120 - SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE case11 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1: +row = util_test.write_psql("UPDATE case11 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case11;",host,dbname,port,pw,usr) + print(f"SELECT * from case11 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 150 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case11';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + +## Confirm the test results from the spock.exception_log: +result = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case11';",host,dbname,port2,pw,usr) +print(f"Line 166 - TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {result}") +print("*"*100) + + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(result): + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_sub-disable2.py b/t/spock_exception_table_case_sub-disable2.py new file mode 100644 index 00000000..65ba2fc9 --- /dev/null +++ b/t/spock_exception_table_case_sub-disable2.py @@ -0,0 +1,221 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 47 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 50 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 57 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 59 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 62 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case22 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case22 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case22 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"{n} is the value in n") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"Line 72 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case22 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO case22 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case22;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 124 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case22 VALUES (33, 33000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case22;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") + +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE case22 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE case22 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n2 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM case22",host,dbname,port2,pw,usr).strip("[]") +print(f"TEST STEP: bid 11 should be updated on n2, case22 contains: {row}") +print("*"*100) + +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 191 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case22';",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") +print("*"*100) + +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + diff --git a/t/spock_exception_table_case_sub-disable3.py b/t/spock_exception_table_case_sub-disable3.py new file mode 100644 index 00000000..5f2d0d40 --- /dev/null +++ b/t/spock_exception_table_case_sub-disable3.py @@ -0,0 +1,200 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'sub_disable', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'none', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case33 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case33 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case33 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + ## Check replication + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case33 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 and n2 should both contain bid 1/11") +print("*"*100) + + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case33 VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO case33 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 137 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case33;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"Line 170 - The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case33';",host,dbname,port2,pw,usr) +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_transdiscard1.py b/t/spock_exception_table_case_transdiscard1.py new file mode 100644 index 00000000..eb6d9a34 --- /dev/null +++ b/t/spock_exception_table_case_transdiscard1.py @@ -0,0 +1,179 @@ +import sys, os, util_test, subprocess, time + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") +home_dir=os.getenv("EDGE_HOME_DIR") +port2=port1+1 +nc_dir=os.getenv("NC_DIR","nc") +home_dir = os.getenv("EDGE_HOME_DIR") + + +## Check the information from cluster list-nodes. +res=util_test.run_nc_cmd("Check the cluster with the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 39 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 42 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 46 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 49 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 51 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 54 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case111 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case111 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case111 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Check replication + print(f"We're on node n{n} now:") + status=util_test.run_cmd("Checking spock sub-show-status", f"spock sub-show-status {sub} {dbname}", f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + print(f"The port before adding 1 is: {port}") + port = port + 1 + print(f"The port after adding 1 is: {port}") + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + print("*"*100) + port = port + 1 + +## Test Steps +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n1 that will not be replicated to n2 + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case111 VALUES (2, 70000, null); +END $$; +""" + +print(f"Executing the anonymous block: anon_block") +row = util_test.write_psql(f"{anon_block}",host,dbname,port1,pw,usr) +print(row) + +## Look for our row on n1 and n2: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT * FROM case111;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + port = port+1 + print("*"*100) + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## More test steps: +## Update the record that is out of sync, forcing a record into the exception table... +row = util_test.write_psql("UPDATE case111 SET filler = 'hi' WHERE bid = 2",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues on n1: +row = util_test.write_psql("UPDATE case111 SET filler = 'bye' WHERE bid = 1",host,dbname,port1,pw,usr) +print(f"TEST STEP: The update to bid 1 on n1 returns: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Check our table contents: + result = util_test.read_psql("SELECT * FROM case111;",host,dbname,port,pw,usr) + print(f"SELECT * from case111 on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Query the spock.exception_log; adding this command to cover error in 4.0.4 where a query on the wrong node caused a server crash. +row1 = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case111';",host,dbname,port1,pw,usr) +print(f"This command is the query that used to cause a server crash! The result s/b []: {row1}") +print("*"*100) + +if '[]' not in str(row1): + util_test.EXIT_FAIL() + +## Confirm the test results from the spock.exception_log: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case111';",host,dbname,port2,pw,usr) +print(f"TEST CONFIRMATION: SELECT * FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 2, "attname": "bid", "atttype": "int4"' in str(row): + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_exception_table_case_transdiscard2.py b/t/spock_exception_table_case_transdiscard2.py new file mode 100644 index 00000000..5aa805d2 --- /dev/null +++ b/t/spock_exception_table_case_transdiscard2.py @@ -0,0 +1,214 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +command = (f"cluster list-nodes demo") +res=util_test.run_nc_cmd("Exercise the list-nodes command", command, f"{home_dir}") +print(f"Command: {command}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 46 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 49 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 53 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 58 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 61 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case222 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case222 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case222 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + ## Confirm with SELECT relname FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + port = port + 1 + +print("Confirming the configuration") +## Confirm the configuration: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + ## Confirm with SELECT * FROM spock.subscription. + result = util_test.read_psql("SELECT * FROM spock.subscription;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.subscriptions returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n1 returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Add two rows that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case222 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +row = util_test.write_psql("INSERT INTO case222 VALUES(22, 22000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 22 on n1: {row}") +print("*"*100) + + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case222;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case222 VALUES (33, 33000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Check the rows on n1 and n2: + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case222;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + print("*"*100) + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"TEST STEP: We're in repair mode - the table on n1 should contain 1/11/22, and n2 should contain 1/11/22/33") + +## Node n2 has three rows; bid 33 is not replicated to n1, so an update should end up in the exception log table: +row = util_test.write_psql("UPDATE case222 SET filler = 'trouble' WHERE bid = 33",host,dbname,port2,pw,usr) +print(f"TEST STEP: We're in repair mode - the update to bid 33 on n2 returns: {row}") +print("*"*100) + +## Demonstrate that replication continues +row = util_test.write_psql("UPDATE case222 SET filler = 'replication check' WHERE bid = 11",host,dbname,port2,pw,usr) +print(f"TEST STEP: The update to bid 11 on n1 returns: {row}") +print("*"*100) + +## Show that the row update made it to n2 without causing a death spiral: +row = util_test.read_psql("SELECT * FROM case222",host,dbname,port2,pw,usr).strip("[]") +print(f"TEST STEP: bid 11 should be updated on n2, case222 contains: {row}") +print("*"*100) + +## Check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + print("*"*100) + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + + +## Read from the spock.exception_log on n1 (the update of bid3 should be here); +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case222';",host,dbname,port1,pw,usr) +print(f"SELECT * FROM spock.exception_log on n1 returns: {row}") +print("*"*100) + +if '"value": 33, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + diff --git a/t/spock_exception_table_case_transdiscard3.py b/t/spock_exception_table_case_transdiscard3.py new file mode 100644 index 00000000..7b4c4d16 --- /dev/null +++ b/t/spock_exception_table_case_transdiscard3.py @@ -0,0 +1,192 @@ +import sys, os, util_test,subprocess + +## Print Script +print(f"Starting - {os.path.basename(__file__)}") + +## Get Test Settings +util_test.set_env() +# +repo=os.getenv("EDGE_REPO") +num_nodes=int(os.getenv("EDGE_NODES",2)) +cluster_dir=os.getenv("EDGE_CLUSTER_DIR") +port1=int(os.getenv("EDGE_START_PORT",6432)) +usr=os.getenv("EDGE_USERNAME","admin") +pw=os.getenv("EDGE_PASSWORD","password1") +db=os.getenv("EDGE_DB","demo") +host=os.getenv("EDGE_HOST","localhost") +repuser=os.getenv("EDGE_REPUSER","pgedge") +repset=os.getenv("EDGE_REPSET","demo-repset") +spockpath=os.getenv("EDGE_SPOCK_PATH") +dbname=os.getenv("EDGE_DB","lcdb") +pgv=os.getenv("EDGE_INST_VERSION") + +port2=port1+1 +print(port2) + +print("*"*100) +nc_dir=os.getenv("NC_DIR","nc") +print(nc_dir) +home_dir = os.getenv("EDGE_HOME_DIR") +print(home_dir) + +# Check the information from cluster list-nodes. +# +res=util_test.run_nc_cmd("Exercise the list-nodes command", (f"cluster list-nodes demo"), f"{home_dir}") +print(f"The list-nodes command returns = {res}\n") +print("*"*100) + +## Set the exception logging behaviors for the test: +for n in range(num_nodes): + n=n+1 + + ## Set spock.exception_behaviour: + res=util_test.guc_set('spock.exception_behaviour', 'transdiscard', f"{cluster_dir}/n{n}") + print(f"Line 44 - res: {res.stdout}") + ## Set spock.exception_logging: + res=util_test.guc_set('spock.exception_logging', 'all', f"{cluster_dir}/n{n}") + print(f"Line 47 - SHOW spock.exception_logging: {res.stdout}") + ## Restart the service: + command = "service restart pg{pgv}" + res=util_test.run_cmd("Restart the service", (f"service restart pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 51 - res: {res.stdout}") + ## Check the GUC values: + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_behaviour",(f"{cluster_dir}/n{n}")) + print(f"Line 54 - SHOW spock.exception_behaviour: {res.stdout}") + res = util_test.run_cmd("Run db guc-show command", "db guc-show spock.exception_logging",(f"{cluster_dir}/n{n}")) + print(f"Line 56 - SHOW spock.exception_logging: {res.stdout}") + ## Check server status: + res=util_test.run_cmd("Check the service status", (f"service status pg{pgv}"), (f"{cluster_dir}/n{n}")) + print(f"Line 59 - res: {res.stdout}") + +print("Setup starts") +## Setup - on each node: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Create a table: + result = util_test.write_psql(f"CREATE TABLE case333 (bid integer PRIMARY KEY, bbalance integer, filler character(88))",host,dbname,port,pw,usr) + ## Add a row: + result = util_test.write_psql("INSERT INTO case333 VALUES (1, 11111, 'filler')",host,dbname,port,pw,usr) + ## Add it to the default repset: + result=util_test.run_cmd("comment", f"spock repset-add-table default case333 {dbname}", f"{cluster_dir}/n{n}") + print(f"The repset-add-table command on n{n} returns: {result.stdout}") + print("*"*100) + ## Confirm with SELECT * FROM spock.tables. + result = util_test.read_psql("SELECT relname FROM spock.tables;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables returns: {result}") + print("*"*100) + +## Add one row that should be replicated from n1 to n2: + +row = util_test.write_psql("INSERT INTO case333 VALUES(11, 11000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We inserted bid 11 on n1: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 and n2 should both contain bid 1/11") +print("*"*100) + + +## Create an anonymous block that puts the cluster in repair mode and does an insert statement that will +## add a row to n2 that will not be replicated to n1: + +anon_block = """ +DO $$ +BEGIN + PERFORM spock.repair_mode('True'); + INSERT INTO case333 VALUES (22, 22000, null); +END $$; +""" + +print(anon_block) +row = util_test.write_psql(f"{anon_block}",host,dbname,port2,pw,usr) +print(row) + +## Add a row to n1 that has the same bid as the row we added on n2; we're still in repair mode: + +row = util_test.write_psql("INSERT INTO case333 VALUES(22, 99000, null)",host,dbname,port1,pw,usr) +print(f"TEST STEP: We just tried to insert bid 22 on n1 - this should fail, but it doesn't: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +print(f"Node n1 should contain bid 1/11") +print(f"Node n2 should contain bid 1/11/22") + +## Check the results from the statement above, and you can see the duplicate primary key error +## is not being caught. Fix this when the patch is in. + +## Read from the spock.exception_log on n1; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port1,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n1 returns an empty result set: {row}") +print("*"*100) + +## Read from the spock.exception_log on n2; +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log",host,dbname,port2,pw,usr).strip("[]") +print(f"SELECT remote_new_tup FROM spock.exception_log on n2 returns the replication error: {row}") +print("*"*100) + +## Look for our row on n1 and n2 and check the replication state - specifically we don't want a death spiral here: +port = port1 +subscriptions =["sub_n1n2","sub_n2n1"] +n = 0 +for sub in subscriptions: + n = n + 1 + ## Confirm table content: + result = util_test.read_psql("SELECT * FROM case333;",host,dbname,port,pw,usr) + print(f"SELECT * from spock.tables on node n{n} returns: {result}") + ## Confirm with spock sub-show-status + status=util_test.run_cmd("Checking spock sub-show-status", (f"spock sub-show-status {sub} {dbname}"), f"{cluster_dir}/n{n}") + print(f"The spock sub-show-status {sub} {dbname} command on n{n} returns: {status.stdout}") + port = port + 1 + + if "replicating" not in status.stdout: + util_test.EXIT_FAIL() + +## Read from the spock.exception_log on n2 for our needle/haystack step: +row = util_test.read_psql("SELECT remote_new_tup FROM spock.exception_log WHERE table_name = 'case333';",host,dbname,port2,pw,usr) +print(f"TEST STEP: SELECT remote_new_tup FROM spock.exception_log on n2 returns: {row}") +print("*"*100) + +if '"value": 22, "attname": "bid", "atttype": "int4"' in str(row): + + util_test.EXIT_PASS() +else: + util_test.EXIT_FAIL() + +util_test.exit_message(f"Pass - {os.path.basename(__file__)}", 0) + + diff --git a/t/spock_node_create_no_dbname.py b/t/spock_node_create_no_dbname.py index 04cbe475..90d532a8 100644 --- a/t/spock_node_create_no_dbname.py +++ b/t/spock_node_create_no_dbname.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a database name: command = f"spock node-create n1 'host={host} user={repuser} dbname={dbname}'" diff --git a/t/spock_node_create_no_dns.py b/t/spock_node_create_no_dns.py index 9240a8d5..98bd3015 100644 --- a/t/spock_node_create_no_dns.py +++ b/t/spock_node_create_no_dns.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a node name: command = f"spock node-create n1 {dbname}" diff --git a/t/spock_node_create_no_node_name.py b/t/spock_node_create_no_node_name.py index 55c0cdd0..d11815ba 100644 --- a/t/spock_node_create_no_node_name.py +++ b/t/spock_node_create_no_node_name.py @@ -19,17 +19,6 @@ repset=os.getenv("EDGE_REPSET","demo-repset") spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") -# -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) # Invoke spock node-create, but don't specify a node name: diff --git a/t/spock_node_create_no_repset_user.py b/t/spock_node_create_no_repset_user.py index b03cec88..d93da35c 100644 --- a/t/spock_node_create_no_repset_user.py +++ b/t/spock_node_create_no_repset_user.py @@ -20,17 +20,6 @@ spockpath=os.getenv("EDGE_SPOCK_PATH") dbname=os.getenv("EDGE_DB","lcdb") # -# Check for "n1", and drop it if it exists; then we'll use spock node-create to create errors. This way we can play the tests out of order. -# -check_value = util_test.read_psql("select * from spock.node;",host,dbname,port,pw,usr).strip("[]") -print(f"Check value is: {check_value}") - -if "n1" in str(check_value): - drop_node = f"spock node-drop n1 {dbname}" - drop=util_test.run_cmd("Run spock node-drop.", drop_node, f"{cluster_dir}/n1") - print(f"Print drop.stdout here: - {drop.stdout}") -print("*"*100) - # Invoke spock node-create, but don't specify a node name: command = f"spock node-create n1 'host={host} user={usr} dbname={dbname}' {dbname}" diff --git a/t/util_test.py b/t/util_test.py index 1b5eebb7..be1d10c6 100644 --- a/t/util_test.py +++ b/t/util_test.py @@ -1,5 +1,7 @@ -import sys, os, psycopg, json, subprocess, shutil, re, csv +import sys, os, psycopg, json, subprocess, shutil, re, csv, socket from dotenv import load_dotenv +from psycopg import sql + def EXIT_PASS(): print("pass") @@ -23,6 +25,57 @@ def exit_message(p_msg, p_rc=1): sys.exit(p_rc) +# ************************************************************************************************************** +## Enable AutoDDL +# ************************************************************************************************************** +# To call this function, pass a connection string: +# command = util_test.enable_autoddl(host, dbname, port, pw, usr) + +## Get a connection - this connection sets autocommit to True and returns authentication error information + +def get_autoddl_conn(host,dbname,port,pw,usr): + try: + conn = psycopg.connect(dbname=dbname, user=usr, host=host, port=port, password=pw) + conn.autocommit = True + print("Your connection is established, with autocommit = True") + return conn + + except Exception as e: + conn = None + print("The connection attempt failed") + return(con1) + +############################## + +def enable_autoddl(host, dbname, port, pw, usr): + try: + # Connect to the PostgreSQL database + + conn = get_autoddl_conn(host,dbname,port,pw,usr) + cur = conn.cursor() + # We'll execute the following commands: + + cur.execute("ALTER SYSTEM SET spock.enable_ddl_replication = on") + cur.execute("ALTER SYSTEM SET spock.include_ddl_repset = on") + cur.execute("ALTER SYSTEM SET spock.allow_ddl_from_functions = on") + + # Then, reload the PostgreSQL configuration: + cur.execute("SELECT pg_reload_conf()") + print("PostgreSQL configuration reloaded.") + + # Close the cursor and connection + cur.close() + conn.close() + + except Exception as e: + print(f"An error occurred: {e}") + + + + + + + # ************************************************************************************************************** ## Run a pgEdge command # ************************************************************************************************************** @@ -263,7 +316,7 @@ def read_psql(cmd,host,dbname,port,pw,usr,indent=None): cur = con.cursor() cur.execute(cmd) print(cmd) - ret = json.dumps(cur.fetchall(), indent=indent) + ret = json.dumps(cur.fetchall(), indent=indent, default=str) cur.close() except Exception as e: exit_message(e) @@ -419,3 +472,78 @@ def printres(res: subprocess.CompletedProcess[str]) -> None: print("stderr:") for line in error.splitlines(): print(f"\t{line}") + +################################################################### +## Find an available port +################################################################### +def get_avail_ports(p_def_port): + def_port = int(p_def_port) + + # iterate to first non-busy port + while is_socket_busy(def_port): + def_port = def_port + 1 + continue + + err_msg = "Port must be between 1000 and 9999, try again." + + while True: + s_port = str(def_port) + + if s_port.isdigit() == False: + print(err_msg) + continue + + i_port = int(s_port) + + if (i_port < 1000) or (i_port > 9999): + print(err_msg) + continue + + if is_socket_busy(i_port): + if not isJSON: + print("Port " + str(i_port) + " is in use.") + def_port = str(i_port + 1) + continue + + break + + return i_port + +## Required for get_available_port + +def is_socket_busy(p_port): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = s.connect_ex(("127.0.0.1",p_port)) + s.close() + print(result) + if result == 0: + return True + else: + return False + +###################################################################### +# Find the pg_versions available to um +###################################################################### + +def find_pg_versions(home_dir): + components = [] + ## We need to pass in the value of {home_dir}; it should return a list of components with the pg removed from the front. + ## Use um to find all of the available versions of Postgres. + res=run_nc_cmd("Getting list of available versions of Postgres", "um list --json", f"{home_dir}") + print(f"{res}") + + ## Break the returned json string into a list: + res = json.loads(res.stdout) + ## Go through the json and find the available PG versions and append it to the components variable: + for i in res: + comp=(i.get("component")) + print(comp) + ## Append the component name to the components variable: + components.append(comp) + print(components) + ## Remove the first two letters from in front of the component name (pgXX) to make it just the version (XX): + versions = [item[2:] for item in components] + print(versions) + return versions, components + +######################################################################