Skip to content

Commit

Permalink
fix: improve qgetdata/qdata tests (#5744)
Browse files Browse the repository at this point in the history
## Issue being fixed or feature implemented
The test is a bit broken and incomplete, some testing scenarios aren't
realistic

## What was done?
pls see individual commits

## How Has This Been Tested?
run it

## Breaking Changes
n/a, tests only

## Checklist:
- [x] I have performed a self-review of my own code
- [x] I have commented my code, particularly in hard-to-understand areas
- [ ] I have added or updated relevant unit/integration/functional/e2e
tests
- [ ] I have made corresponding changes to the documentation
- [x] I have assigned this pull request to a milestone _(for repository
code-owners and collaborators only)_
  • Loading branch information
UdjinM6 authored Dec 6, 2023
1 parent ca4490a commit 8a888fb
Showing 1 changed file with 59 additions and 56 deletions.
115 changes: 59 additions & 56 deletions test/functional/p2p_quorum_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ def wait_for_banscore(node, peer_id, expected_score):
def get_score():
for peer in node.getpeerinfo():
if peer["id"] == peer_id:
if (peer["banscore"] == expected_score):
# The score matches the one we expected.
# Wait a bit to make sure it won't change
# to avoid false positives.
time.sleep(1)
return peer["banscore"]
return None
wait_until(lambda: get_score() == expected_score, timeout=6)
Expand Down Expand Up @@ -96,29 +101,19 @@ def __init__(self):

def test_qgetdata(self, qgetdata, expected_error=0, len_vvec=0, len_contributions=0, response_expected=True):
self.send_message(qgetdata)
self.wait_for_qdata(message_expected=response_expected)
self.wait_for_qmessage("qdata", message_expected=response_expected)
if response_expected:
assert_qdata(self.get_qdata(), qgetdata, expected_error, len_vvec, len_contributions)

def wait_for_qgetdata(self, timeout=3, message_expected=True):
def test_function():
return self.message_count["qgetdata"]
wait_until(test_function, timeout=timeout, lock=mininode_lock, do_assert=message_expected)
self.message_count["qgetdata"] = 0
def wait_for_qmessage(self, message=None, timeout=3, message_expected=True):
wait_until(lambda: self.message_count[message] > 0, timeout=timeout, lock=mininode_lock, do_assert=message_expected)
if not message_expected:
assert not self.message_count["qgetdata"]
assert self.message_count[message] == 0
self.message_count[message] = 0

def get_qdata(self):
return self.last_message["qdata"]

def wait_for_qdata(self, timeout=10, message_expected=True):
def test_function():
return self.message_count["qdata"]
wait_until(test_function, timeout=timeout, lock=mininode_lock, do_assert=message_expected)
self.message_count["qdata"] = 0
if not message_expected:
assert not self.message_count["qdata"]


class QuorumDataMessagesTest(DashTestFramework):
def set_test_params(self):
Expand All @@ -139,52 +134,54 @@ def run_test(self):
def force_request_expire(bump_seconds=self.quorum_data_request_expiration_timeout + 1):
self.bump_mocktime(bump_seconds)
# Test with/without expired request cleanup
if node0.getblockcount() % 2:
if self.cleanup:
node0.generate(1)
self.sync_blocks()

def test_basics():
self.log.info("Testing basics of QGETDATA/QDATA")
force_request_expire()
p2p_node0 = p2p_connection(node0)
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_node0 = get_mininode_id(node0)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn2 = get_mininode_id(mn2.node)

# Ensure that both nodes start with zero ban score
wait_for_banscore(node0, id_p2p_node0, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
wait_for_banscore(mn2.node, id_p2p_mn2, 0)

self.log.info("Check that normal node doesn't respond to qgetdata "
"and does bump our score")
p2p_node0.test_qgetdata(qgetdata_all, response_expected=False)
wait_for_banscore(node0, id_p2p_node0, 10)
# The masternode should not respond to qgetdata for non-masternode connections
self.log.info("Check that masternode doesn't respond to "
"non-masternode connection. Doesn't bump score.")
p2p_mn1.test_qgetdata(qgetdata_all, response_expected=False)
wait_for_banscore(mn1.node, id_p2p_mn1, 10)
"non-masternode connection and does bump our score")
p2p_mn2.test_qgetdata(qgetdata_all, response_expected=False)
wait_for_banscore(mn2.node, id_p2p_mn2, 10)
# Open a fake MNAUTH authenticated P2P connection to the masternode to allow qgetdata
node0.disconnect_p2ps()
mn1.node.disconnect_p2ps()
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
mn2.node.disconnect_p2ps()
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
# The masternode should now respond to qgetdata requests
self.log.info("Request verification vector")
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
p2p_mn2.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn2.node, id_p2p_mn2, 0)
# Note: our banscore is bumped as we are requesting too rapidly,
# however the node still returns the data
self.log.info("Request encrypted contributions")
p2p_mn1.test_qgetdata(qgetdata_contributions, 0, 0, self.llmq_size)
wait_for_banscore(mn1.node, id_p2p_mn1, 25)
p2p_mn2.test_qgetdata(qgetdata_contributions, 0, 0, self.llmq_size)
wait_for_banscore(mn2.node, id_p2p_mn2, 25)
# Request both
# Note: our banscore is bumped as we are requesting too rapidly,
# however the node still returns the data
self.log.info("Request both")
p2p_mn1.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn1.node, id_p2p_mn1, 50)
mn1.node.disconnect_p2ps()
p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn2.node, id_p2p_mn2, 50)
mn2.node.disconnect_p2ps()

self.log.info("Test ban score increase for invalid / unexpected QDATA")
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
Expand All @@ -202,51 +199,52 @@ def test_basics():
# - Already received
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
p2p_mn1.wait_for_qmessage("qgetdata")
p2p_mn1.send_message(qdata_valid)
time.sleep(1)
p2p_mn1.send_message(qdata_valid)
wait_for_banscore(mn1.node, id_p2p_mn1, 20)
# - Not like requested
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
p2p_mn1.wait_for_qmessage("qgetdata")
qdata_invalid_request = qdata_valid
qdata_invalid_request.data_mask = 2
p2p_mn1.send_message(qdata_invalid_request)
wait_for_banscore(mn1.node, id_p2p_mn1, 30)
# - Invalid verification vector
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
p2p_mn1.wait_for_qmessage("qgetdata")
qdata_invalid_vvec = qdata_valid
qdata_invalid_vvec.quorum_vvec.pop()
p2p_mn1.send_message(qdata_invalid_vvec)
wait_for_banscore(mn1.node, id_p2p_mn1, 40)
# - Invalid contributions
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
p2p_mn1.wait_for_qmessage("qgetdata")
qdata_invalid_contribution = qdata_valid
qdata_invalid_contribution.enc_contributions.pop()
p2p_mn1.send_message(qdata_invalid_contribution)
wait_for_banscore(mn1.node, id_p2p_mn1, 50)
mn1.node.disconnect_p2ps()
mn2.node.disconnect_p2ps()

self.log.info("Test all available error codes")
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
qgetdata_invalid_type = msg_qgetdata(quorum_hash_int, 105, 0x01, protx_hash_int)
qgetdata_invalid_block = msg_qgetdata(protx_hash_int, 100, 0x01, protx_hash_int)
qgetdata_invalid_quorum = msg_qgetdata(int(mn1.node.getblockhash(0), 16), 100, 0x01, protx_hash_int)
qgetdata_invalid_quorum = msg_qgetdata(int(mn2.node.getblockhash(0), 16), 100, 0x01, protx_hash_int)
qgetdata_invalid_no_member = msg_qgetdata(quorum_hash_int, 100, 0x02, quorum_hash_int)
p2p_mn1.test_qgetdata(qgetdata_invalid_type, QUORUM_TYPE_INVALID)
p2p_mn1.test_qgetdata(qgetdata_invalid_block, QUORUM_BLOCK_NOT_FOUND)
p2p_mn1.test_qgetdata(qgetdata_invalid_quorum, QUORUM_NOT_FOUND)
p2p_mn1.test_qgetdata(qgetdata_invalid_no_member, MASTERNODE_IS_NO_MEMBER)
p2p_mn2.test_qgetdata(qgetdata_invalid_type, QUORUM_TYPE_INVALID)
p2p_mn2.test_qgetdata(qgetdata_invalid_block, QUORUM_BLOCK_NOT_FOUND)
p2p_mn2.test_qgetdata(qgetdata_invalid_quorum, QUORUM_NOT_FOUND)
p2p_mn2.test_qgetdata(qgetdata_invalid_no_member, MASTERNODE_IS_NO_MEMBER)
# The last two error case require the node to miss its DKG data so we just reindex the node.
mn1.node.disconnect_p2ps()
mn2.node.disconnect_p2ps()
self.restart_mn(mn1, reindex=True)
# Re-connect to the masternode
p2p_mn1 = p2p_connection(mn1.node)
Expand All @@ -260,6 +258,8 @@ def test_basics():
# Validate the DKG data is missing
p2p_mn1.test_qgetdata(qgetdata_vvec, QUORUM_VERIFICATION_VECTOR_MISSING)
p2p_mn1.test_qgetdata(qgetdata_contributions, ENCRYPTED_CONTRIBUTIONS_MISSING)
self.test_mn_quorum_data(mn1, 100, quorum_hash, expect_secret=False)

self.log.info("Test DKG data recovery with QDATA")
# Now that mn1 is missing its DKG data try to recover it by querying the data from mn2 and then sending it
# to mn1 with a direct QDATA message.
Expand All @@ -274,7 +274,7 @@ def test_basics():
# Trigger mn1 - QGETDATA -> p2p_mn1
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
# Wait until mn1 sent the QGETDATA to p2p_mn1
p2p_mn1.wait_for_qgetdata()
p2p_mn1.wait_for_qmessage("qgetdata")
# Send the QDATA received from mn2 to mn1
p2p_mn1.send_message(p2p_mn2.get_qdata())
# Now mn1 should have its data back!
Expand Down Expand Up @@ -368,6 +368,7 @@ def test_qwatch_connections():
def test_watchquorums():
self.log.info("Test -watchquorums support")
for extra_args in [[], ["-watchquorums"]]:
force_request_expire()
self.restart_node(0, self.extra_args[0] + extra_args)
for i in range(self.num_nodes - 1):
self.connect_nodes(0, i + 1)
Expand All @@ -377,9 +378,9 @@ def test_watchquorums():
id_p2p_mn2 = get_mininode_id(mn2.node)
mnauth(node0, id_p2p_node0, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
assert node0.quorum("getdata", id_p2p_node0, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_node0.wait_for_qgetdata()
p2p_mn2.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold)
assert node0.quorum("getdata", id_p2p_node0, 100, quorum_hash, 0x01)
p2p_node0.wait_for_qmessage("qgetdata")
p2p_node0.send_message(p2p_mn2.get_qdata())
wait_for_banscore(node0, id_p2p_node0, (1 - len(extra_args)) * 10)
node0.disconnect_p2ps()
Expand Down Expand Up @@ -410,15 +411,17 @@ def test_rpc_quorum_getdata_protx_hash():
protx_hash_int = int(mn1.proTxHash, 16)

# Valid requests
qgetdata_vvec = msg_qgetdata(quorum_hash_int, 100, 0x01, protx_hash_int)
qgetdata_vvec = msg_qgetdata(quorum_hash_int, 100, 0x01)
qgetdata_contributions = msg_qgetdata(quorum_hash_int, 100, 0x02, protx_hash_int)
qgetdata_all = msg_qgetdata(quorum_hash_int, 100, 0x03, protx_hash_int)

test_basics()
test_request_limit()
test_qwatch_connections()
test_watchquorums()
test_rpc_quorum_getdata_protx_hash()
# Test with/without expired request cleanup
for self.cleanup in [True, False]:
test_basics()
test_request_limit()
test_qwatch_connections()
test_watchquorums()
test_rpc_quorum_getdata_protx_hash()


if __name__ == '__main__':
Expand Down

0 comments on commit 8a888fb

Please sign in to comment.