Skip to content

Commit

Permalink
merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
danwt committed May 15, 2024
2 parents 9606d91 + bd3c97d commit df608b5
Show file tree
Hide file tree
Showing 25 changed files with 281 additions and 270 deletions.
60 changes: 42 additions & 18 deletions .github/workflows/changelog.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,42 +12,66 @@ jobs:
- name: Checkout the repository
uses: actions/checkout@v3
with:
fetch-depth: 0
fetch-depth: 1

- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'

- name: Generate Changelog Update
- name: Configure Git
run: |
npm install -g conventional-changelog-cli
conventional-changelog -p angular -i CHANGELOG.md -s
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Check if "auto-changelog-update" branch exists and update if needed
- name: Check if "auto-changelog-update-do-not-create-manually" branch exists
id: check_branch
run: |
git fetch origin auto-changelog-update:auto-changelog-update --no-tags
if git rev-parse --verify --quiet auto-changelog-update; then
echo "::set-output name=branch_exists::true"
git checkout auto-changelog-update
git merge main --no-edit
if [ -n "$(git ls-remote --heads origin auto-changelog-update-do-not-create-manually)" ]; then
git fetch origin auto-changelog-update-do-not-create-manually
echo "branch_exists=true" >> $GITHUB_ENV
else
echo "::set-output name=branch_exists::false"
git checkout -b auto-changelog-update
echo "branch_exists=false" >> $GITHUB_ENV
fi
- name: Create or Update Pull Request
- name: Generate Changelog Update and update if branch exists
run: |
npm install -g conventional-changelog-cli
if [ "$branch_exists" == "true" ]; then
git checkout auto-changelog-update-do-not-create-manually
git merge main --strategy-option theirs --allow-unrelated-histories --no-edit
conventional-changelog -p angular -i CHANGELOG.md -s -r 0 --append
git add CHANGELOG.md
git commit -m "Update CHANGELOG.md [skip ci]"
git push origin auto-changelog-update-do-not-create-manually
exit 0
else
git checkout main
git checkout -b auto-changelog-update-do-not-create-manually
git push origin auto-changelog-update-do-not-create-manually
conventional-changelog -p angular -i CHANGELOG.md -s -r 0 --append
fi
- name: Create Pull Request
if: env.branch_exists == 'false'
id: cpr
uses: peter-evans/create-pull-request@v4
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "Update CHANGELOG.md [skip ci]"
title: "Automated Changelog Update"
title: "Chore(changelog): Automated Changelog Update [skip ci]"
body: "Update the CHANGELOG.md with recent pushes to branch main."
branch: "auto-changelog-update" # Static branch name
base: "main"
branch: "auto-changelog-update-do-not-create-manually"
delete-branch: true
branch-suffix: none
- name: Check if PR needs to be updated

- name: Check outputs
if: env.branch_exists == 'false'
run: |
echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
- name: Log if PR updated
if: steps.cpr.outputs.pull-request-operation == 'updated'
run: |
echo "Changelog PR updated due to new commit to main."
2 changes: 1 addition & 1 deletion block/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHead
}
copy(block.Header.LastCommitHash[:], e.getLastCommitHash(lastCommit, &block.Header))
copy(block.Header.DataHash[:], e.getDataHash(block))
copy(block.Header.AggregatorsHash[:], state.Validators.Hash())
copy(block.Header.SequencersHash[:], state.Validators.Hash())

return block
}
Expand Down
25 changes: 16 additions & 9 deletions block/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,17 +133,17 @@ func (m *Manager) Start(ctx context.Context) error {
m.logger.Info("Starting the block manager")

// Check if proposer key matches to the one in the settlement layer
var isAggregator bool
var isSequencer bool
slProposerKey := m.SLClient.GetProposer().PublicKey.Bytes()
localProposerKey, err := m.ProposerKey.GetPublic().Raw()
if err != nil {
return fmt.Errorf("get local node public key: %w", err)
}
if bytes.Equal(slProposerKey, localProposerKey) {
m.logger.Info("Starting in aggregator mode")
isAggregator = true
m.logger.Info("Starting in sequencer mode")
isSequencer = true
} else {
m.logger.Info("Starting in non-aggregator mode")
m.logger.Info("Starting in non-sequencer mode")
}

// Check if InitChain flow is needed
Expand All @@ -156,7 +156,7 @@ func (m *Manager) Start(ctx context.Context) error {
}
}

if !isAggregator {
if !isSequencer {
go uevent.MustSubscribe(ctx, m.Pubsub, "applyGossipedBlocksLoop", p2p.EventQueryNewNewGossipedBlock, m.onNewGossipedBlock, m.logger)
}

Expand All @@ -165,7 +165,7 @@ func (m *Manager) Start(ctx context.Context) error {
return fmt.Errorf("sync block manager: %w", err)
}

if isAggregator {
if isSequencer {
// TODO: populate the accumulatedSize on startup
go m.ProduceBlockLoop(ctx)
go m.SubmitLoop(ctx)
Expand Down Expand Up @@ -215,21 +215,28 @@ func getAddress(key crypto.PrivKey) ([]byte, error) {
// TODO: move to gossip.go
// onNewGossippedBlock will take a block and apply it
func (m *Manager) onNewGossipedBlock(event pubsub.Message) {
m.retrieverMutex.Lock() // needed to protect blockCache access
eventData := event.Data().(p2p.GossipedBlock)
block := eventData.Block
commit := eventData.Commit
m.logger.Debug("Received new block via gossip", "height", block.Header.Height, "n cachedBlocks", len(m.blockCache))
m.retrieverMutex.Lock() // needed to protect blockCache access
_, found := m.blockCache[block.Header.Height]
// It is not strictly necessary to return early, for correctness, but doing so helps us avoid mutex pressure and unnecessary repeated attempts to apply cached blocks
if found {
m.retrieverMutex.Unlock()
return
}

m.logger.Debug("Received new block via gossip", "block height", block.Header.Height, "store height", m.Store.Height(), "n cachedBlocks", len(m.blockCache))

nextHeight := m.Store.NextHeight()
if block.Header.Height >= nextHeight {
m.blockCache[block.Header.Height] = CachedBlock{
Block: &block,
Commit: &commit,
}
m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.Store.Height())
}
m.retrieverMutex.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant

err := m.attemptApplyCachedBlocks()
if err != nil {
m.logger.Error("applying cached blocks", "err", err)
Expand Down
8 changes: 3 additions & 5 deletions block/submit.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,9 @@ func (m *Manager) SubmitLoop(ctx context.Context) {
}

/*
Note: since we dont explicitly coordinate changes to the accumulated size with actual batch creation
we don't have a guarantee that the accumulated size is the same as the actual batch size that will be made.
See https://github.com/dymensionxyz/dymint/issues/828
Until that is fixed, it's technically possibly to undercount, by having a some blocks be produced in between
setting the counter to 0, and actually producing the batch.
Note: since we dont explicitly coordinate changes to the accumulated size with actual batch creation
we don't have a guarantee that the accumulated size is the same as the actual batch size that will be made.
But the batch creation step will also check the size is OK, so it's not a problem.
*/
m.AccumulatedBatchSize.Store(0)

Expand Down
2 changes: 1 addition & 1 deletion block/synctarget.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (m *Manager) SyncToTargetHeightLoop(ctx context.Context) {
m.targetSyncHeight.Set(diodes.GenericDataType(&h))
m.logger.Info("Set new target sync height", "height", h)
case <-subscription.Cancelled():
m.logger.Info("syncTargetLoop subscription canceled")
m.logger.Error("syncTargetLoop subscription canceled")
return
}
}
Expand Down
6 changes: 3 additions & 3 deletions config/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ func AddNodeFlags(cmd *cobra.Command) {

cmd.Flags().String(FlagDALayer, def.DALayer, "Data Availability Layer Client name (mock or grpc")
cmd.Flags().String(FlagDAConfig, def.DAConfig, "Data Availability Layer Client config")
cmd.Flags().Duration(FlagBlockTime, def.BlockTime, "block time (for aggregator mode)")
cmd.Flags().Duration(FlagMaxIdleTime, def.MaxIdleTime, "max time for empty blocks (for aggregator mode)")
cmd.Flags().Duration(FlagBatchSubmitMaxTime, def.BatchSubmitMaxTime, "max time for batch submit (for aggregator mode)")
cmd.Flags().Duration(FlagBlockTime, def.BlockTime, "block time (for sequencer mode)")
cmd.Flags().Duration(FlagMaxIdleTime, def.MaxIdleTime, "max time for empty blocks (for sequencer mode)")
cmd.Flags().Duration(FlagBatchSubmitMaxTime, def.BatchSubmitMaxTime, "max time for batch submit (for sequencer mode)")
cmd.Flags().String(FlagNamespaceID, def.NamespaceID, "namespace identifies (8 bytes in hex)")
cmd.Flags().Uint64(FlagBlockBatchMaxSizeBytes, def.BlockBatchMaxSizeBytes, "block batch size in bytes")

Expand Down
4 changes: 4 additions & 0 deletions da/celestia/celestia.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ func (c *DataAvailabilityLayerClient) Start() (err error) {
}
return nil
case <-ticker.C:
state, err := rpc.Header.SyncState(c.ctx)
if err != nil {
return err
}
c.logger.Info("celestia-node still syncing", "height", state.Height, "target", state.ToHeight)
}
}
Expand Down
Loading

0 comments on commit df608b5

Please sign in to comment.