From 024723a87c8366e9bde3148eb26ab3448714dfdb Mon Sep 17 00:00:00 2001 From: vshepard Date: Wed, 4 Sep 2024 10:01:31 +0200 Subject: [PATCH] tests 2.8.2 --- .env | 26 ++ tests/archive_test.py | 1 + tests/auth_test.py | 356 ++++++++++++++++- tests/backup_test.py | 14 +- tests/catchup_test.py | 43 ++- tests/delete_test.py | 7 +- tests/helpers/ptrack_helpers.py | 7 +- tests/logging_test.py | 6 +- tests/option_test.py | 2 +- tests/page_test.py | 22 +- tests/pbckp1242_test.py | 662 ++++++++++++++++++++++++++++++++ tests/replica_test.py | 4 +- tests/requirements.txt | 4 +- tests/s3_auth_test.py | 20 + tests/time_consuming_test.py | 2 + tests/validate_test.py | 3 + 16 files changed, 1135 insertions(+), 44 deletions(-) create mode 100644 .env create mode 100644 tests/pbckp1242_test.py create mode 100644 tests/s3_auth_test.py diff --git a/.env b/.env new file mode 100644 index 00000000..54cf812b --- /dev/null +++ b/.env @@ -0,0 +1,26 @@ +TARGET_OS=ubuntu +TARGET_OS_VERSION=22.04 +PG_PRODUCT=enterprise +PG_REPO=postgrespro +PG_SRCDIR=./postgrespro +PG_PRODUCT_SUFFIX=-ent +PG_VERSION=15 +PG_VERSION_SUFFIX=-15 +PTRACK=ON +PG_PROBACKUP_PTRACK=ON +PGPROBACKUPBIN=/home/vshepard/pbckp/ent-15/bin/pg_probackup +PG_CONFIG=/home/vshepard/pbckp/ent-15/bin/pg_config +PGPROBACKUPBIN3=/home/vshepard/workspace/work/pg_probackup/dev-ee-probackup/pg_probackup3/builddir/src/pg_probackup3 +LANG=C.UTF-8 +LC_ALL=C + +PG_PROBACKUP_S3_HOST=10.5.52.86 +PG_PROBACKUP_S3_PORT=9000 +PG_PROBACKUP_S3_REGION=us-east-1 +PG_PROBACKUP_S3_BUCKET_NAME=test1 +PG_PROBACKUP_S3_ACCESS_KEY=minioadmin +PG_PROBACKUP_S3_SECRET_ACCESS_KEY=minioadmin +PG_PROBACKUP_S3_HTTPS=OFF +PG_PROBACKUP_S3_TEST=minio +PG_PROBACKUP_S3_BUFFER_SIZE=64 +PG_PROBACKUP_S3_RETRIES=10 diff --git a/tests/archive_test.py b/tests/archive_test.py index 034223aa..5cc744ea 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -7,6 +7,7 @@ import subprocess from sys import exit from time import sleep +from pathlib import PurePath from testgres import ProcessType diff --git a/tests/auth_test.py b/tests/auth_test.py index d1a7c707..0a8ee590 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -1,20 +1,348 @@ +""" +The Test suite check behavior of pg_probackup utility, if password is required for connection to PostgreSQL instance. + - https://confluence.postgrespro.ru/pages/viewpage.action?pageId=16777522 +""" + +import os +import unittest +import signal +import time + from .helpers.ptrack_helpers import ProbackupTest +from testgres import StartNodeException + +skip_test = False + +try: + from pexpect import * +except ImportError: + skip_test = True + + +class SimpleAuthTest(ProbackupTest): + + # @unittest.skip("skip") + def test_backup_via_unprivileged_user(self): + """ + Make node, create unprivileged user, try to + run a backups without EXECUTE rights on + certain functions + """ + node = self.pg_node.make_simple('node', + set_replication=True, + ptrack_enable=self.ptrack) + + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + node.slow_start() + + if self.ptrack: + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + + node.safe_psql("postgres", "CREATE ROLE backup with LOGIN") + + self.pb.backup_node('node', node, options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + if self.pg_config_version < 150000: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_start_backup") + else: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_backup_start") + + if self.pg_config_version < 150000: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_start_backup(text, boolean, boolean) TO backup;") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_backup_start(text, boolean) TO backup;") + + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + + self.pb.backup_node('node', node, + options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied for function " + "pg_create_restore_point\nquery was: " + "SELECT pg_catalog.pg_create_restore_point($1)") + + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_create_restore_point(text) TO backup;") + + self.pb.backup_node('node', node, + options=['-U', 'backup'], + expect_error='due to missing grant on EXECUTE') + if self.pg_config_version < 150000: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_stop_backup") + else: + self.assertMessage(contains= + "ERROR: Query failed: ERROR: permission denied " + "for function pg_backup_stop") + + if self.pg_config_version < self.version_to_num('15.0'): + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup;") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup;") + + self.pb.backup_node('node', node, options=['-U', 'backup']) + + node.safe_psql("postgres", "CREATE DATABASE test1") + + self.pb.backup_node('node', node, options=['-U', 'backup']) + + node.safe_psql( + "test1", "create table t1 as select generate_series(0,100)") + + node.stop() + node.slow_start() + + node.safe_psql( + "postgres", + "ALTER ROLE backup REPLICATION") + + # FULL + self.pb.backup_node('node', node, options=['-U', 'backup']) + + # PTRACK + if self.ptrack: + self.pb.backup_node('node', node, + backup_type='ptrack', options=['-U', 'backup']) + + +class AuthTest(ProbackupTest): + pb = None + node = None + + # TODO move to object scope, replace module_name + @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") + def setUp(self): + + super().setUp() + + self.node = self.pg_node.make_simple("node", + set_replication=True, + initdb_params=['--auth-host=md5'], + pg_options={'archive_timeout': '5s'}, + ) + + self.modify_pg_hba(self.node) + + self.pb.init() + self.pb.add_instance(self.node.name, self.node) + self.pb.set_archiving(self.node.name, self.node) + try: + self.node.slow_start() + except StartNodeException: + raise unittest.skip("Node hasn't started") + + + version = self.pg_config_version + if version < 150000: + self.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + else: + self.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + + if version >= 150000: + home_dir = os.path.join(self.test_path, "home") + os.makedirs(home_dir, exist_ok=True) + self.test_env['HOME'] = home_dir + self.pgpass_file = os.path.join(home_dir, '.pgpass') + self.pgpass_file_lock = None + else: + # before PGv15 only true home dir were inspected. + # Since we can't have separate file per test, we have to serialize + # tests. + self.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') + self.pgpass_file_lock = self.pgpass_file + '~probackup_test_lock' + # have to lock pgpass by creating file in exclusive mode + for i in range(120): + try: + open(self.pgpass_file_lock, "x").close() + except FileExistsError: + time.sleep(1) + else: + break + else: + raise TimeoutError("can't create ~/.pgpass~probackup_test_lock for 120 seconds") + + self.pb_cmd = ['backup', + '--instance', self.node.name, + '-h', '127.0.0.1', + '-p', str(self.node.port), + '-U', 'backup', + '-d', 'postgres', + '-b', 'FULL', + '--no-sync' + ] + + def tearDown(self): + super().tearDown() + if not self.pgpass_file_lock: + return + if hasattr(self, "pgpass_line") and os.path.exists(self.pgpass_file): + with open(self.pgpass_file, 'r') as fl: + lines = fl.readlines() + if self.pgpass_line in lines: + lines.remove(self.pgpass_line) + if len(lines) == 0: + os.remove(self.pgpass_file) + else: + with open(self.pgpass_file, 'w') as fl: + fl.writelines(lines) + os.remove(self.pgpass_file_lock) + + def test_empty_password(self): + """ Test case: PGPB_AUTH03 - zero password length """ + try: + self.assertIn("ERROR: no password supplied", + self.run_pb_with_auth('\0\r\n')) + except (TIMEOUT, ExceptionPexpect) as e: + self.fail(e.value) + + def test_wrong_password(self): + """ Test case: PGPB_AUTH04 - incorrect password """ + self.assertIn("password authentication failed", + self.run_pb_with_auth('wrong_password\r\n')) + + def test_right_password(self): + """ Test case: PGPB_AUTH01 - correct password """ + self.assertIn("completed", + self.run_pb_with_auth('password\r\n')) + + def test_right_password_and_wrong_pgpass(self): + """ Test case: PGPB_AUTH05 - correct password and incorrect .pgpass (-W)""" + line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) + self.create_pgpass(self.pgpass_file, line) + self.assertIn("completed", + self.run_pb_with_auth('password\r\n', add_args=["-W"])) + + def test_ctrl_c_event(self): + """ Test case: PGPB_AUTH02 - send interrupt signal """ + try: + self.run_pb_with_auth(kill=True) + except TIMEOUT: + self.fail("Error: CTRL+C event ignored") + + def test_pgpassfile_env(self): + """ Test case: PGPB_AUTH06 - set environment var PGPASSFILE """ + path = os.path.join(self.test_path, 'pgpass.conf') + line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) + self.create_pgpass(path, line) + self.test_env["PGPASSFILE"] = path + self.assertEqual( + "OK", + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) + + def test_pgpass(self): + """ Test case: PGPB_AUTH07 - Create file .pgpass in home dir. """ + line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) + self.create_pgpass(self.pgpass_file, line) + self.assertEqual( + "OK", + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) + + def test_pgpassword(self): + """ Test case: PGPB_AUTH08 - set environment var PGPASSWORD """ + self.test_env["PGPASSWORD"] = "password" + self.assertEqual( + "OK", + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) + + def test_pgpassword_and_wrong_pgpass(self): + """ Test case: PGPB_AUTH09 - Check priority between PGPASSWORD and .pgpass file""" + line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) + self.create_pgpass(self.pgpass_file, line) + self.test_env["PGPASSWORD"] = "password" + self.assertEqual( + "OK", + self.pb.show(self.node.name, self.pb.run(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) + def run_pb_with_auth(self, password=None, add_args = [], kill=False): + cmd = [*self.pb_cmd, *add_args, *self.backup_dir.pb_args] + with spawn(self.probackup_path, cmd, + encoding='utf-8', timeout=60, env=self.test_env) as probackup: + result = probackup.expect(u"Password for user .*:", 10) + if kill: + probackup.kill(signal.SIGINT) + elif result == 0: + probackup.sendline(password) + probackup.expect(EOF) + return str(probackup.before) + else: + raise ExceptionPexpect("Other pexpect errors.") -class AuthorizationTest(ProbackupTest): - """ - Check connect to S3 via pre_start_checks() function - calling pg_probackup init --s3 - test that s3 keys allow to connect to all types of storages - """ + def modify_pg_hba(self, node): + """ + Description: + Add trust authentication for user postgres. Need for add new role and set grant. + :param node: + :return None: + """ + hba_conf = os.path.join(node.data_dir, "pg_hba.conf") + with open(hba_conf, 'r+') as fio: + data = fio.read() + fio.seek(0) + fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (self.username, data)) - def test_s3_auth_test(self): - console_output = self.pb.init(options=["--log-level-console=VERBOSE"]) - self.assertNotIn(': 403', console_output) # Because we can have just '403' substring in timestamp - self.assertMessage(console_output, contains='S3_pre_start_check successful') - self.assertMessage(console_output, contains='HTTP response: 200') - self.assertIn( - f"INFO: Backup catalog '{self.backup_dir}' successfully initialized", - console_output) + def create_pgpass(self, path, line): + self.pgpass_line = line+"\n" + with open(path, 'a') as passfile: + # host:port:db:username:password + passfile.write(self.pgpass_line) + os.chmod(path, 0o600) diff --git a/tests/backup_test.py b/tests/backup_test.py index 2e0695b6..bc90636a 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -933,19 +933,19 @@ def test_persistent_slot_for_stream_backup(self): "postgres", "SELECT pg_create_physical_replication_slot('slot_1')") - # FULL backup. By default, --temp-slot=true. + # FULL backup self.pb.backup_node('node', node, - options=['--stream', '--slot=slot_1'], - expect_error="because replication slot already exist") + options=['--stream', '--slot=slot_1', '--temp-slot'], + expect_error="because a replication slot with this name already exists") self.assertMessage(contains='ERROR: replication slot "slot_1" already exists') # FULL backup self.pb.backup_node('node', node, - options=['--stream', '--slot=slot_1', '--temp-slot=false']) + options=['--stream', '--slot=slot_1']) # FULL backup self.pb.backup_node('node', node, - options=['--stream', '--slot=slot_1', '--temp-slot=false']) + options=['--stream', '--slot=slot_1']) # @unittest.skip("skip") def test_basic_temp_slot_for_stream_backup(self): @@ -964,9 +964,9 @@ def test_basic_temp_slot_for_stream_backup(self): self.pb.backup_node('node', node, options=['--stream', '--temp-slot']) - # FULL backup. By default, --temp-slot=true. + # FULL backup self.pb.backup_node('node', node, - options=['--stream', '--slot=slot_1']) + options=['--stream', '--slot=slot_1', '--temp-slot=on']) # FULL backup self.pb.backup_node('node', node, diff --git a/tests/catchup_test.py b/tests/catchup_test.py index 117ac040..148a670f 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -1201,7 +1201,7 @@ def test_catchup_with_replication_slot(self): destination_node = dst_pg, options = [ '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--slot=nonexistentslot_1a', '--temp-slot=false' + '--slot=nonexistentslot_1a' ], expect_error="because replication slot does not exist" ) @@ -1216,7 +1216,7 @@ def test_catchup_with_replication_slot(self): destination_node = dst_pg, options = [ '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--slot=existentslot_1b', '--temp-slot=false' + '--slot=existentslot_1b' ] ) @@ -2060,3 +2060,42 @@ def test_waldir_dry_run_catchup_full(self): # Cleanup src_pg.stop() + def test_custom_replication_slot(self): + # preparation + my_slot = "my_slot" + + src_pg = self.pg_node.make_simple('src', + set_replication=True + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + src_pg.safe_psql("postgres", f"SELECT * FROM pg_create_physical_replication_slot('{my_slot}');") + + src_query_result = src_pg.table_checksum("ultimate_question") + + # do full catchup + dst_pg = self.pg_node.make_empty('dst') + self.pb.catchup_node( + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '-S', my_slot] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir, exclude_dirs=['pg_replslot']), + self.pgdata_content(dst_pg.data_dir, exclude_dirs=['pg_replslot']), + ) + + # run&recover catchup'ed instance + src_pg.stop() + dst_options = {'port': str(dst_pg.port)} + dst_pg.set_auto_conf(dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.table_checksum("ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/delete_test.py b/tests/delete_test.py index 761aa36f..111a48d6 100644 --- a/tests/delete_test.py +++ b/tests/delete_test.py @@ -778,15 +778,10 @@ def test_basic_dry_run_del_instance(self): self.pb.init() self.pb.add_instance('node', node) - self.pb.set_archiving('node', node) node.slow_start() # full backup - self.pb.backup_node('node', node) - # restore - node.cleanup() - self.pb.restore_node('node', node=node) - node.slow_start() + self.pb.backup_node('node', node, options=['--stream']) content_before = self.pgdata_content(self.backup_dir) # Delete instance diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index f0662901..f9695d3f 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -875,8 +875,11 @@ def __init__(self, data): self.data = data @contextlib.contextmanager - def modify_backup_control(self, backup_dir, instance, backup_id): - path = os.path.join('backups', instance, backup_id, 'backup.control') + def modify_backup_control(self, backup_dir, instance, backup_id, content=False): + file = 'backup.control' + if content: + file = 'backup_content.control' + path = os.path.join('backups', instance, backup_id, file) control_file = backup_dir.read_file(path) cf = ProbackupTest.ControlFileContainer(control_file) yield cf diff --git a/tests/logging_test.py b/tests/logging_test.py index 85e646c1..e2767d92 100644 --- a/tests/logging_test.py +++ b/tests/logging_test.py @@ -92,7 +92,7 @@ def test_truncate_rotation_file(self): output = self.pb.backup_node('node', node, options=[ '--stream', - '--log-level-file=LOG'], + '--log-level-file=INFO'], return_id=False) # check that log file wasn`t rotated @@ -152,7 +152,7 @@ def test_unlink_rotation_file(self): output = self.pb.backup_node('node', node, options=[ '--stream', - '--log-level-file=LOG'], + '--log-level-file=INFO'], return_id=False) # check that log file wasn`t rotated @@ -211,7 +211,7 @@ def test_garbage_in_rotation_file(self): output = self.pb.backup_node('node', node, options=[ '--stream', - '--log-level-file=LOG'], + '--log-level-file=INFO'], return_id=False) # check that log file wasn`t rotated diff --git a/tests/option_test.py b/tests/option_test.py index 89c5c52e..a6633182 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -21,7 +21,7 @@ def test_without_backup_path_3(self): self.pb.run(["backup", "-b", "full"], expect_error="because '-B' parameter is not specified", use_backup_dir=None) self.assertMessage(contains="No backup catalog path specified.\n" - "Please specify it either using environment variable BACKUP_DIR or\n" + "Please specify it either using environment variable BACKUP_PATH or\n" "command line option --backup-path (-B)") def test_options_4(self): diff --git a/tests/page_test.py b/tests/page_test.py index 10959bd8..78046668 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -731,22 +731,32 @@ def test_page_backup_with_alien_wal_segment(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i;") - alien_node.safe_psql( + alien_node.execute( "postgres", "create database alien") - alien_node.safe_psql( + wal_file = alien_node.execute( + "alien", + "SELECT pg_walfile_name(pg_current_wal_lsn());" + ) + filename = wal_file[0][0] + self.compress_suffix + + alien_node.execute( "alien", "create sequence t_seq; " "create table t_heap_alien as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,10000) i;") + alien_node.execute( + "alien", + "select pg_switch_wal()") + node.execute( + "postgres", + "select pg_switch_wal()") - # copy latest wal segment - wals = self.get_instance_wal_list(backup_dir, 'alien_node') - filename = max(wals) - # wait `node` did archived same file + # wait nodes archived same file + self.wait_instance_wal_exists(backup_dir, 'alien_node', filename) self.wait_instance_wal_exists(backup_dir, 'node', filename) file_content = self.read_instance_wal(backup_dir, 'alien_node', filename) self.write_instance_wal(backup_dir, 'node', filename, file_content) diff --git a/tests/pbckp1242_test.py b/tests/pbckp1242_test.py new file mode 100644 index 00000000..7e55f460 --- /dev/null +++ b/tests/pbckp1242_test.py @@ -0,0 +1,662 @@ +import unittest +import os +import re +from time import sleep, time +from datetime import datetime + +from pg_probackup2.gdb import needs_gdb + +from .helpers.ptrack_helpers import base36enc, ProbackupTest +from .helpers.ptrack_helpers import fs_backup_class +import subprocess + +tblspace_name = 'some_tblspace' + +class Pbckp1242Test(ProbackupTest): + + def setup_node(self): + node = self.pg_node.make_simple( + "node", + set_replication=True, + initdb_params=['--data-checksums'] + ) + self.pb.init() + self.pb.add_instance( 'node', node) + node.slow_start() + return node + + def jump_the_oid(self, node): + pg_connect = node.connect("postgres", autocommit=True) + gdb = self.gdb_attach(pg_connect.pid) + gdb._execute('set ShmemVariableCache->nextOid=1<<31') + gdb._execute('set ShmemVariableCache->oidCount=0') + gdb.detach() + + @needs_gdb + def test_table_with_giga_oid(self): + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_database_with_giga_oid(self): + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1', 'db2') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_table_with_giga_oid_in_tablespace(self): + node = self.setup_node() + self.create_tblspace_in_node(node, tblspace_name) + + self.jump_the_oid(node) + + node.execute(f'CREATE TABLE t1 (i int) TABLESPACE {tblspace_name}') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + + @needs_gdb + def test_database_with_giga_oid_in_tablespace(self): + node = self.setup_node() + self.create_tblspace_in_node(node, tblspace_name) + + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2 TABLESPACE {tblspace_name}') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1', 'db2') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_database_with_giga_oid_in_tablespace_2(self): + node = self.setup_node() + self.create_tblspace_in_node(node, tblspace_name) + + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int) TABLESPACE {tblspace_name}') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1', 'db2') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_detect_database_with_giga_oid_in_tablespace(self): + node = self.setup_node() + self.create_tblspace_in_node(node, tblspace_name) + + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2 TABLESPACE {tblspace_name}') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.prepare_backup_for_detect_missed_database(backup_id) + + self.pb.restore_node('node', node, + backup_id=backup_id, + expect_error="database with giga oid") + self.assertMessage(contains="probably has missing files in") + self.assertMessage(contains="were created by misbehaving") + + def test_nodetect_database_without_giga_oid_in_tablespace(self): + node = self.setup_node() + self.create_tblspace_in_node(node, tblspace_name) + + node.execute(f'CREATE DATABASE db2 TABLESPACE {tblspace_name}') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1', 'db2') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.prepare_backup_for_detect_missed_database(backup_id) + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_tablespace_with_giga_oid(self): + node = self.setup_node() + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + + table1_checksum = node.table_checksum('t1', 'db2') + + self.jump_the_oid(node) + + self.create_tblspace_in_node(node, tblspace_name) + + node.execute(f'ALTER DATABASE db2 SET TABLESPACE {tblspace_name}') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_detect_tablespace_with_giga_oid(self): + node = self.setup_node() + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + + table1_checksum = node.table_checksum('t1', 'db2') + + self.jump_the_oid(node) + + self.create_tblspace_in_node(node, tblspace_name) + + node.execute(f'ALTER DATABASE db2 SET TABLESPACE {tblspace_name}') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.prepare_backup_for_detect_missed_tablespace(backup_id) + + self.pb.restore_node('node', node, + backup_id=backup_id, + expect_error='tablespace with gigaoid') + + self.assertMessage(contains="has missing tablespace") + self.assertMessage(contains="were created by misbehaving") + + def test_nodetect_tablespace_without_giga_oid(self): + node = self.setup_node() + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + + table1_checksum = node.table_checksum('t1', 'db2') + + self.create_tblspace_in_node(node, tblspace_name) + + node.execute(f'ALTER DATABASE db2 SET TABLESPACE {tblspace_name}') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.stop() + node.cleanup() + + self.prepare_backup_for_detect_missed_tablespace(backup_id) + + self.pb.restore_node('node', node, + backup_id=backup_id) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1', 'db2') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_detect_giga_oid_table(self): + """Detect we couldn't increment based on backup with misdetected file type""" + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + self.prepare_backup_for_detect_nondatafile_relation(backup_id) + + self.pb.backup_node('node', node, backup_type='delta', + options=['--stream'], + expect_error="relation is mistakenly marked as non-datafile") + self.assertMessage(contains="were created by misbehaving") + self.assertMessage(contains="Could not use it as a parent for increment") + + def test_nodetect_giga_oid_table(self): + """Detect we could increment based on backup without misdetected file type""" + node = self.setup_node() + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + self.prepare_backup_for_detect_nondatafile_relation(backup_id) + + node.execute('INSERT INTO t1 (i) SELECT generate_series(2000, 3000)') + + table1_checksum = node.table_checksum('t1') + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id2) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + @needs_gdb + def test_detect_giga_oid_table_in_merge_restore(self): + """Detect we cann't merge/restore mixed increment chain with misdetected file type""" + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id1 = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + self.backup_control_version_to_2_7_3(backup_id1) + self.prepare_backup_for_detect_nondatafile_relation(backup_id2) + + self.pb.merge_backup('node', backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + self.pb.merge_backup('node', backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + + @needs_gdb + def test_allow_giga_oid_table_in_restore(self): + """Detect we can restore uniform increment chain with misdetected file type""" + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id1 = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + self.prepare_backup_for_detect_nondatafile_relation(backup_id1) + self.prepare_backup_for_detect_nondatafile_relation(backup_id2) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, backup_id=backup_id2) + # although we did restore, we could not check table checksum, + # because we backup relations as datafiles + # (because we backuped with fixed pbckp1242), + # and restore relation as non-datafile (ie with probackup's headers) + + self.pb.merge_backup('node', backup_id2, + expect_error="because of backups with bug") + self.assertMessage(contains='backups with 2.8.0/2.8.1') + self.assertMessage(contains="Could not merge them.") + + @needs_gdb + def test_nodetect_giga_oid_table_in_merge_restore(self): + """Detect we can merge/restore mixed increment chain without misdetected file type""" + node = self.setup_node() + + node.execute(f'CREATE TABLE t1 (i int)') + node.execute('INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id1 = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.execute('INSERT INTO t1 (i) SELECT generate_series(2000, 3000)') + node.execute('CHECKPOINT') + + table1_checksum = node.table_checksum('t1') + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + self.backup_control_version_to_2_7_3(backup_id1) + self.prepare_backup_for_detect_nondatafile_relation(backup_id2) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, backup_id=backup_id2) + + node.slow_start() + + new_table1_checksum = node.table_checksum('t1') + + self.assertEqual(new_table1_checksum, table1_checksum, "table checksums doesn't match") + + self.pb.merge_backup('node', backup_id2) + + @needs_gdb + def test_detect_giga_oid_database_in_merge_restore(self): + """Detect we cann't merge/restore mixed increment chain with misdetected file type""" + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id1 = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.execute('db2', f'CREATE TABLE t2 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(2000, 3000)') + node.execute('db2', 'INSERT INTO t2 (i) SELECT generate_series(2000, 3000)') + node.execute('CHECKPOINT') + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + self.backup_control_version_to_2_7_3(backup_id1) + self.prepare_backup_for_detect_gigaoid_database(backup_id2) + + self.pb.merge_backup('node', backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + self.pb.merge_backup('node', backup_id2, + options=['--no-validate'], + expect_error="due to chain of mixed bug/nobug backups") + + self.assertMessage(contains="kind reg detected is_datafile=0 stored=1") + + @needs_gdb + def test_allow_giga_oid_database_in_restore(self): + """Detect we can restore uniform increment chain with misdetected file type""" + node = self.setup_node() + self.jump_the_oid(node) + + node.execute(f'CREATE DATABASE db2') + + node.execute('db2', f'CREATE TABLE t1 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(1, 1000)') + node.execute('CHECKPOINT') + + backup_id1 = self.pb.backup_node('node', node, backup_type='full', + options=['--stream']) + + node.execute('db2', f'CREATE TABLE t2 (i int)') + node.execute('db2', 'INSERT INTO t1 (i) SELECT generate_series(2000, 3000)') + node.execute('db2', 'INSERT INTO t2 (i) SELECT generate_series(2000, 3000)') + node.execute('CHECKPOINT') + + backup_id2 = self.pb.backup_node('node', node, backup_type='delta', + options=['--stream']) + self.prepare_backup_for_detect_gigaoid_database(backup_id1) + self.prepare_backup_for_detect_gigaoid_database(backup_id2) + + node.stop() + node.cleanup() + + self.pb.restore_node('node', node, + backup_id=backup_id2) + # although we did restore, we could not check table checksum, + # because we backup relations as datafiles + # (because we backuped with fixed pbckp1242), + # and restore relation as non-datafile (ie with probackup's headers) + + self.pb.merge_backup('node', backup_id2, + expect_error="because of backups with bug") + self.assertMessage(contains='backups with 2.8.0/2.8.1') + self.assertMessage(contains="Could not merge them.") + + def backup_control_version_to(self, version, backup_id): + with self.modify_backup_control(self.backup_dir, 'node', backup_id) as control: + new = [] + for line in control.data.splitlines(True): + if line.startswith('program-version'): + line = f'program-version = {version}\n' + elif line.startswith('content-crc'): + line = 'content-crc = 0\n' + new.append(line) + control.data = "".join(new) + + def backup_control_version_to_2_8_1(self, backup_id): + self.backup_control_version_to('2.8.1', backup_id) + + def backup_control_version_to_2_7_3(self, backup_id): + self.backup_control_version_to('2.7.3', backup_id) + + def prepare_backup_for_detect_missed_database(self, backup_id): + self.backup_control_version_to_2_8_1(backup_id) + + with self.modify_backup_control(self.backup_dir, 'node', backup_id, content=True) as content: + new = [] + for line in content.data.splitlines(True): + if 'pg_tblspc' in line: + st = line.index('pg_tblspc') + en = line.index('"', st) + path = line[st:en] + elems = path.split('/') + if len(elems) > 4 and len(elems[3]) >= 10: + # delete all files in database folder with giga-oid + continue + new.append(line) + content.data = "".join(new) + + def prepare_backup_for_detect_missed_tablespace(self, backup_id): + self.backup_control_version_to_2_8_1(backup_id) + + with self.modify_backup_control(self.backup_dir, 'node', backup_id, content=True) as content: + new = [] + for line in content.data.splitlines(True): + if 'pg_tblspc' in line: + st = line.index('pg_tblspc') + en = line.index('"', st) + path = line[st:en] + elems = path.split('/') + if len(elems) >= 2 and len(elems[1]) >= 10: + # delete giga-oid tablespace completely + continue + new.append(line) + content.data = "".join(new) + + def prepare_backup_for_detect_nondatafile_relation(self, backup_id): + self.backup_control_version_to_2_8_1(backup_id) + + with self.modify_backup_control(self.backup_dir, 'node', backup_id, content=True) as content: + new = [] + for line in content.data.splitlines(True): + if 'base/' in line: + st = line.index('base/') + en = line.index('"', st) + path = line[st:en] + elems = path.split('/') + if len(elems) == 3 and len(elems[2]) >= 10 and elems[2].isdecimal(): + # pretend it is not datafile + line = line.replace('"is_datafile":"1"', '"is_datafile":"0"') + new.append(line) + content.data = "".join(new) + + def prepare_backup_for_detect_gigaoid_database(self, backup_id): + self.backup_control_version_to_2_8_1(backup_id) + + with self.modify_backup_control(self.backup_dir, 'node', backup_id, content=True) as content: + new = [] + for line in content.data.splitlines(True): + if 'base/' in line: + st = line.index('base/') + en = line.index('"', st) + path = line[st:en] + elems = path.split('/') + if len(elems) == 3 and len(elems[1]) >= 10 and elems[2].isdecimal(): + # 1. change dbOid = dbOid / 10 + # 2. pretend it is not datafile + line = line.replace('"is_datafile":"1"', '"is_datafile":"0"') + line = line.replace(f'"dbOid":"{elems[1]}"', f'"dbOid":"{int(elems[1])//10}"') + new.append(line) + content.data = "".join(new) diff --git a/tests/replica_test.py b/tests/replica_test.py index 7ff539b3..afadab17 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -989,7 +989,9 @@ def test_replica_promote_2(self): master.safe_psql( 'postgres', 'CREATE TABLE t1 AS ' - 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' + 'SELECT i,' + ' (select string_agg(md5((i^j)::text), \',\')' + ' from generate_series(1,5006056) j) AS fat_attr ' 'FROM generate_series(0,1) i') self.wait_until_replica_catch_with_master(master, replica) diff --git a/tests/requirements.txt b/tests/requirements.txt index 32910a3b..eea7b8c4 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -6,10 +6,10 @@ # 3. From a local directory # /path/to/local/directory/testgres testgres==1.10.0 -git+https://github.com/postgrespro/testgres.git@fix-json-parse-in-show#egg=testgres_pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +testgres-pg-probackup2==0.0.2 allure-pytest deprecation -minio==7.2.5 +minio pexpect pytest==7.4.3 pytest-xdist diff --git a/tests/s3_auth_test.py b/tests/s3_auth_test.py new file mode 100644 index 00000000..d1a7c707 --- /dev/null +++ b/tests/s3_auth_test.py @@ -0,0 +1,20 @@ +from .helpers.ptrack_helpers import ProbackupTest + + +class AuthorizationTest(ProbackupTest): + """ + Check connect to S3 via pre_start_checks() function + calling pg_probackup init --s3 + + test that s3 keys allow to connect to all types of storages + """ + + def test_s3_auth_test(self): + console_output = self.pb.init(options=["--log-level-console=VERBOSE"]) + + self.assertNotIn(': 403', console_output) # Because we can have just '403' substring in timestamp + self.assertMessage(console_output, contains='S3_pre_start_check successful') + self.assertMessage(console_output, contains='HTTP response: 200') + self.assertIn( + f"INFO: Backup catalog '{self.backup_dir}' successfully initialized", + console_output) diff --git a/tests/time_consuming_test.py b/tests/time_consuming_test.py index 3da2208d..4fe77e56 100644 --- a/tests/time_consuming_test.py +++ b/tests/time_consuming_test.py @@ -68,6 +68,8 @@ def test_pbckp150(self): pgbenchval.kill() pgbench.wait() pgbenchval.wait() + pgbench.stdout.close() + pgbenchval.stdout.close() backups = self.pb.show('node') for b in backups: diff --git a/tests/validate_test.py b/tests/validate_test.py index 3b97171d..bf608ad2 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -2305,6 +2305,9 @@ def test_corrupt_pg_control_via_resetxlog(self): os.mkdir( os.path.join( self.backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) + os.mkdir( + os.path.join( + self.backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'summaries')) pg_control_path = os.path.join( self.backup_dir, 'backups', 'node',