-
Notifications
You must be signed in to change notification settings - Fork 51
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
refactor: Network test sync logic #2748
Merged
nasdf
merged 19 commits into
sourcenetwork:develop
from
nasdf:nasdf/refactor/network-test-actions
Jul 18, 2024
Merged
Changes from 12 commits
Commits
Show all changes
19 commits
Select commit
Hold shift + click to select a range
e25b68a
refactor network test actions
nasdf d832d32
fix node restart subscription
nasdf b299568
fix peer collection expected docs
nasdf 489dc47
Merge branch 'develop' into nasdf/refactor/network-test-actions
nasdf ba973ee
Merge branch 'develop' into nasdf/refactor/network-test-actions
nasdf 997148a
Merge branch 'develop' into nasdf/refactor/network-test-actions
nasdf 8fb16cf
add test event state
nasdf 6e6e3f5
move all event test logic to events file
nasdf f3540b0
move p2p state to struct
nasdf 4b88f6b
simplify p2p test state
nasdf 6e136a1
make peer connections deterministic. remove p2p sleeps
nasdf 25ee3a4
handle closed subscriptions
nasdf 301445c
add required sleeps to p2p test actions
nasdf 666dbd2
fix acp tests
nasdf 6e8fc67
notify exchange of new block before push log
nasdf d036cb6
fix linter error
nasdf 85410d3
Merge branch 'develop' into nasdf/refactor/network-test-actions
nasdf 5596026
rename SkipUpdateEvent to SkipLocalUpdateEvent
nasdf b4fbe86
Merge branch 'develop' into nasdf/refactor/network-test-actions
nasdf File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,241 @@ | ||
// Copyright 2024 Democratized Data Foundation | ||
// | ||
// Use of this software is governed by the Business Source License | ||
// included in the file licenses/BSL.txt. | ||
// | ||
// As of the Change Date specified in that file, in accordance with | ||
// the Business Source License, use of this software will be governed | ||
// by the Apache License, Version 2.0, included in the file | ||
// licenses/APL.txt. | ||
|
||
package tests | ||
|
||
import ( | ||
"context" | ||
"time" | ||
|
||
"github.com/sourcenetwork/immutable" | ||
"github.com/stretchr/testify/require" | ||
|
||
"github.com/sourcenetwork/defradb/event" | ||
) | ||
|
||
// eventTimeout is the amount of time to wait | ||
// for an event before timing out | ||
const eventTimeout = 1 * time.Second | ||
|
||
// waitForNetworkSetupEvents waits for p2p topic completed and | ||
// replicator completed events to be published on the local node event bus. | ||
func waitForNetworkSetupEvents(s *state, nodeID int) { | ||
cols, err := s.nodes[nodeID].GetAllP2PCollections(s.ctx) | ||
require.NoError(s.t, err) | ||
|
||
reps, err := s.nodes[nodeID].GetAllReplicators(s.ctx) | ||
require.NoError(s.t, err) | ||
|
||
p2pTopicEvents := 0 | ||
replicatorEvents := len(reps) | ||
|
||
// there is only one message for loading of P2P collections | ||
if len(cols) > 0 { | ||
p2pTopicEvents = 1 | ||
} | ||
|
||
for p2pTopicEvents > 0 && replicatorEvents > 0 { | ||
select { | ||
case _, ok := <-s.nodeEvents[nodeID].replicator.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for network setup events") | ||
} | ||
replicatorEvents-- | ||
|
||
case _, ok := <-s.nodeEvents[nodeID].p2pTopic.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for network setup events") | ||
} | ||
p2pTopicEvents-- | ||
|
||
case <-time.After(eventTimeout): | ||
s.t.Fatalf("timeout waiting for network setup events") | ||
} | ||
} | ||
} | ||
|
||
// waitForReplicatorConfigureEvent waits for a node to publish a | ||
// replicator completed event on the local event bus. | ||
// | ||
// Expected document heads will be updated for the targeted node. | ||
func waitForReplicatorConfigureEvent(s *state, cfg ConfigureReplicator) { | ||
select { | ||
case _, ok := <-s.nodeEvents[cfg.SourceNodeID].replicator.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for replicator event") | ||
} | ||
|
||
case <-time.After(eventTimeout): | ||
require.Fail(s.t, "timeout waiting for replicator event") | ||
} | ||
|
||
// all previous documents should be merged on the subscriber node | ||
for key, val := range s.nodeP2P[cfg.SourceNodeID].actualDocHeads { | ||
s.nodeP2P[cfg.TargetNodeID].expectedDocHeads[key] = val | ||
} | ||
|
||
// update node connections and replicators | ||
s.nodeP2P[cfg.TargetNodeID].connections[cfg.SourceNodeID] = struct{}{} | ||
s.nodeP2P[cfg.SourceNodeID].connections[cfg.TargetNodeID] = struct{}{} | ||
s.nodeP2P[cfg.SourceNodeID].replicators[cfg.TargetNodeID] = struct{}{} | ||
} | ||
|
||
// waitForReplicatorConfigureEvent waits for a node to publish a | ||
// replicator completed event on the local event bus. | ||
func waitForReplicatorDeleteEvent(s *state, cfg DeleteReplicator) { | ||
select { | ||
case _, ok := <-s.nodeEvents[cfg.SourceNodeID].replicator.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for replicator event") | ||
} | ||
|
||
case <-time.After(eventTimeout): | ||
require.Fail(s.t, "timeout waiting for replicator event") | ||
} | ||
|
||
delete(s.nodeP2P[cfg.SourceNodeID].replicators, cfg.TargetNodeID) | ||
} | ||
|
||
// waitForSubscribeToCollectionEvent waits for a node to publish a | ||
// p2p topic completed event on the local event bus. | ||
// | ||
// Expected document heads will be updated for the subscriber node. | ||
func waitForSubscribeToCollectionEvent(s *state, action SubscribeToCollection) { | ||
select { | ||
case _, ok := <-s.nodeEvents[action.NodeID].p2pTopic.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for p2p topic event") | ||
} | ||
|
||
case <-time.After(eventTimeout): | ||
require.Fail(s.t, "timeout waiting for p2p topic event") | ||
} | ||
|
||
// update peer collections of target node | ||
for _, collectionIndex := range action.CollectionIDs { | ||
if collectionIndex == NonExistentCollectionID { | ||
continue // don't track non existent collections | ||
} | ||
s.nodeP2P[action.NodeID].peerCollections[collectionIndex] = struct{}{} | ||
} | ||
} | ||
|
||
// waitForSubscribeToCollectionEvent waits for a node to publish a | ||
// p2p topic completed event on the local event bus. | ||
func waitForUnsubscribeToCollectionEvent(s *state, action UnsubscribeToCollection) { | ||
select { | ||
case _, ok := <-s.nodeEvents[action.NodeID].p2pTopic.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for p2p topic event") | ||
} | ||
|
||
case <-time.After(eventTimeout): | ||
require.Fail(s.t, "timeout waiting for p2p topic event") | ||
} | ||
|
||
for _, collectionIndex := range action.CollectionIDs { | ||
if collectionIndex == NonExistentCollectionID { | ||
continue // don't track non existent collections | ||
} | ||
delete(s.nodeP2P[action.NodeID].peerCollections, collectionIndex) | ||
} | ||
} | ||
|
||
// waitForUpdateEvents waits for all selected nodes to publish an | ||
// update event to the local event bus. | ||
// | ||
// Expected document heads will be updated for any connected nodes. | ||
func waitForUpdateEvents(s *state, nodeID immutable.Option[int], collectionID int) { | ||
for i := 0; i < len(s.nodes); i++ { | ||
if nodeID.HasValue() && nodeID.Value() != i { | ||
continue // node is not selected | ||
} | ||
|
||
var evt event.Update | ||
select { | ||
case msg, ok := <-s.nodeEvents[i].update.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for update event") | ||
} | ||
evt = msg.Data.(event.Update) | ||
|
||
case <-time.After(eventTimeout): | ||
require.Fail(s.t, "timeout waiting for update event") | ||
} | ||
|
||
// update the actual document head on the node that updated it | ||
s.nodeP2P[i].actualDocHeads[evt.DocID] = evt.Cid | ||
|
||
// update the expected document heads of replicator targets | ||
for id := range s.nodeP2P[i].replicators { | ||
// replicator target nodes push updates to source nodes | ||
s.nodeP2P[id].expectedDocHeads[evt.DocID] = evt.Cid | ||
} | ||
|
||
// update the expected document heads of connected nodes | ||
for id := range s.nodeP2P[i].connections { | ||
// connected nodes share updates of documents they have in common | ||
if _, ok := s.nodeP2P[id].actualDocHeads[evt.DocID]; ok { | ||
s.nodeP2P[id].expectedDocHeads[evt.DocID] = evt.Cid | ||
} | ||
// peer collection subscribers receive updates from any other subscriber node | ||
if _, ok := s.nodeP2P[id].peerCollections[collectionID]; ok { | ||
s.nodeP2P[id].expectedDocHeads[evt.DocID] = evt.Cid | ||
} | ||
} | ||
} | ||
} | ||
|
||
// waitForMergeEvents waits for all expected document heads to be merged to all nodes. | ||
// | ||
// Will fail the test if an event is not received within the expected time interval to prevent tests | ||
// from running forever. | ||
func waitForMergeEvents(s *state, action WaitForSync) { | ||
// use a longer timeout since the sync process can take a while | ||
ctx, cancel := context.WithTimeout(s.ctx, 60*time.Second) | ||
defer cancel() | ||
|
||
for nodeID := 0; nodeID < len(s.nodes); nodeID++ { | ||
expect := s.nodeP2P[nodeID].expectedDocHeads | ||
|
||
// remove any docs that are already merged | ||
// up to the expected document head | ||
for key, val := range s.nodeP2P[nodeID].actualDocHeads { | ||
if head, ok := expect[key]; ok && head.String() == val.String() { | ||
delete(expect, key) | ||
} | ||
} | ||
|
||
// wait for all expected doc heads to be merged | ||
// | ||
// the order of merges does not matter as we only | ||
// expect the latest head to eventually be merged | ||
// | ||
// unexpected merge events are ignored | ||
for len(expect) > 0 { | ||
var evt event.Merge | ||
select { | ||
case msg, ok := <-s.nodeEvents[nodeID].merge.Message(): | ||
if !ok { | ||
require.Fail(s.t, "subscription closed waiting for merge complete event") | ||
} | ||
evt = msg.Data.(event.Merge) | ||
|
||
case <-ctx.Done(): | ||
require.Fail(s.t, "timeout waiting for merge complete event") | ||
} | ||
|
||
head, ok := expect[evt.DocID] | ||
if ok && head.String() == evt.Cid.String() { | ||
delete(expect, evt.DocID) | ||
} | ||
} | ||
} | ||
} |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
question: It looks like
p2pTopicEvents
is only ever 0 or 1, why is it not a boolean? Is this a bug/wip?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Changed to bool