Skip to content

Commit

Permalink
test: Use get_test_messages_ctx in place of get_test_messages
Browse files Browse the repository at this point in the history
  • Loading branch information
nj1973 committed Jul 18, 2024
1 parent 031ad97 commit e88150f
Show file tree
Hide file tree
Showing 12 changed files with 2,727 additions and 2,880 deletions.
1 change: 0 additions & 1 deletion tests/integration/scenarios/test_offload_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -897,7 +897,6 @@ def test_offload_log_path_gcs(config, schema, data_db):
config.offload_fs_scheme != OFFLOAD_FS_SCHEME_GS
or config.log_path.startswith("gs:")
):
messages.log(f"Skipping {id} because it is unnecessary for current config")
pytest.skip(f"Skipping {id} because it is unnecessary for current config")

backend_api = get_backend_testing_api(config, messages)
Expand Down
331 changes: 164 additions & 167 deletions tests/integration/scenarios/test_offload_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@
from tests.testlib.test_framework import test_constants
from tests.testlib.test_framework.test_functions import (
get_backend_testing_api,
get_frontend_testing_api,
get_test_messages,
get_frontend_testing_api_ctx,
get_test_messages_ctx,
)


Expand Down Expand Up @@ -78,178 +78,175 @@ def log_test_marker(messages, test_id):

def test_offload_misc_verification_parallel(config, schema, data_db):
id = "test_offload_misc_verification_parallel"
messages = get_test_messages(config, id)

if config.db_type != offload_constants.DBTYPE_ORACLE:
messages.log(f"Skipping {id} for system: {config.db_type}")
return

backend_api = get_backend_testing_api(config, messages)
frontend_api = get_frontend_testing_api(config, messages, trace_action=id)
repo_client = orchestration_repo_client_factory(
config, messages, trace_action=f"repo_client({id})"
)

# Setup
run_setup(
frontend_api,
backend_api,
config,
messages,
frontend_sqls=frontend_api.standard_dimension_frontend_ddl(
schema, PARALLEL_V_DIM
),
python_fns=[
lambda: drop_backend_test_table(
config, backend_api, messages, data_db, PARALLEL_V_DIM
pytest.skip(f"Skipping {id} for system: {config.db_type}")

with get_test_messages_ctx(config, id) as messages, get_frontend_testing_api_ctx(
config, messages, trace_action=id
) as frontend_api:
backend_api = get_backend_testing_api(config, messages)

# Setup
run_setup(
frontend_api,
backend_api,
config,
messages,
frontend_sqls=frontend_api.standard_dimension_frontend_ddl(
schema, PARALLEL_V_DIM
),
],
)

# Offload with count verification parallelism=3.
# Disables data sampling to minimize risk of other hints being matched.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 3,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}1")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 3, f"{id}1")

# Offload with verification parallelism=1.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 1,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}2")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 1, f"{id}2")

# Offload with verification parallelism=0.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 0,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}3")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 0, f"{id}3")

# Offload with aggregation verification parallelism=4.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 4,
"verify_row_count": "aggregate",
"data_sample_pct": 0,
"reset_backend_table": True,
"create_backend_db": True,
"execute": True,
}
log_test_marker(messages, f"{id}4")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 4, f"{id}4")
python_fns=[
lambda: drop_backend_test_table(
config, backend_api, messages, data_db, PARALLEL_V_DIM
),
],
)

# Offload with count verification parallelism=3.
# Disables data sampling to minimize risk of other hints being matched.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 3,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}1")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 3, f"{id}1")

# Offload with verification parallelism=1.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 1,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}2")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 1, f"{id}2")

# Offload with verification parallelism=0.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 0,
"data_sample_pct": 0,
"reset_backend_table": True,
"execute": False,
}
log_test_marker(messages, f"{id}3")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 0, f"{id}3")

# Offload with aggregation verification parallelism=4.
options = {
"owner_table": schema + "." + PARALLEL_V_DIM,
"verify_parallelism": 4,
"verify_row_count": "aggregate",
"data_sample_pct": 0,
"reset_backend_table": True,
"create_backend_db": True,
"execute": True,
}
log_test_marker(messages, f"{id}4")
run_offload(options, config, messages)
assert hint_text_in_log(messages, config, 4, f"{id}4")


def test_offload_misc_maxvalue_partition(config, schema, data_db):
id = "test_offload_misc_maxvalue_partition"
messages = get_test_messages(config, id)

if config.db_type != offload_constants.DBTYPE_ORACLE:
messages.log(f"Skipping {id} for system: {config.db_type}")
return

backend_api = get_backend_testing_api(config, messages)
frontend_api = get_frontend_testing_api(config, messages, trace_action=id)
repo_client = orchestration_repo_client_factory(
config, messages, trace_action=f"repo_client({id})"
)

# Setup
run_setup(
frontend_api,
backend_api,
config,
messages,
frontend_sqls=frontend_api.sales_based_fact_create_ddl(
schema, MAXVAL_FACT, maxval_partition=True
),
python_fns=[
lambda: drop_backend_test_table(
config, backend_api, messages, data_db, MAXVAL_FACT
pytest.skip(f"Skipping {id} for system: {config.db_type}")

with get_test_messages_ctx(config, id) as messages, get_frontend_testing_api_ctx(
config, messages, trace_action=id
) as frontend_api:
backend_api = get_backend_testing_api(config, messages)
repo_client = orchestration_repo_client_factory(
config, messages, trace_action=f"repo_client({id})"
)

# Setup
run_setup(
frontend_api,
backend_api,
config,
messages,
frontend_sqls=frontend_api.sales_based_fact_create_ddl(
schema, MAXVAL_FACT, maxval_partition=True
),
],
)

# 90/10 Offload of Fact Ready to Convert.
# Offloads first partitions from a fact table ready for subsequent tests.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"older_than_date": test_constants.SALES_BASED_FACT_HV_2,
"reset_backend_table": True,
"create_backend_db": True,
"execute": True,
}
run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
test_constants.SALES_BASED_FACT_HV_2,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_90_10,
)

# 90/10 Offload of Fact with MAXVALUE Partition.
# Offloads all partitions from a MAXVALUE fact table but in 90/10, the MAXVALUE partition should be skipped.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"execute": True,
}
offload_messages = run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
test_constants.SALES_BASED_FACT_HV_6,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_90_10,
)
assert text_in_messages(offload_messages, NO_MAXVALUE_PARTITION_NOTICE_TEXT)

# Offload 90/10 fact to 100/0.
# Offloads all partitions from a fact table including MAXVALUE partition.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"offload_type": OFFLOAD_TYPE_FULL,
"execute": True,
}
offload_messages = run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
None,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_100_0,
check_backend_rowcount=True,
)
python_fns=[
lambda: drop_backend_test_table(
config, backend_api, messages, data_db, MAXVAL_FACT
),
],
)

# 90/10 Offload of Fact Ready to Convert.
# Offloads first partitions from a fact table ready for subsequent tests.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"older_than_date": test_constants.SALES_BASED_FACT_HV_2,
"reset_backend_table": True,
"create_backend_db": True,
"execute": True,
}
run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
test_constants.SALES_BASED_FACT_HV_2,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_90_10,
)

# 90/10 Offload of Fact with MAXVALUE Partition.
# Offloads all partitions from a MAXVALUE fact table but in 90/10, the MAXVALUE partition should be skipped.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"execute": True,
}
offload_messages = run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
test_constants.SALES_BASED_FACT_HV_6,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_90_10,
)
assert text_in_messages(offload_messages, NO_MAXVALUE_PARTITION_NOTICE_TEXT)

# Offload 90/10 fact to 100/0.
# Offloads all partitions from a fact table including MAXVALUE partition.
options = {
"owner_table": schema + "." + MAXVAL_FACT,
"offload_type": OFFLOAD_TYPE_FULL,
"execute": True,
}
offload_messages = run_offload(options, config, messages)
assert sales_based_fact_assertion(
config,
backend_api,
frontend_api,
messages,
repo_client,
schema,
data_db,
MAXVAL_FACT,
None,
offload_pattern=scenario_constants.OFFLOAD_PATTERN_100_0,
check_backend_rowcount=True,
)
Loading

0 comments on commit e88150f

Please sign in to comment.