Skip to content

Commit

Permalink
Merge branch 'main' into mtsitrin/634-refactor-use-state-as-single-so…
Browse files Browse the repository at this point in the history
…urce-of-truth
  • Loading branch information
mtsitrin committed May 15, 2024
2 parents 576325e + bd3c97d commit 403ec9e
Show file tree
Hide file tree
Showing 28 changed files with 315 additions and 271 deletions.
60 changes: 42 additions & 18 deletions .github/workflows/changelog.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,42 +12,66 @@ jobs:
- name: Checkout the repository
uses: actions/checkout@v3
with:
fetch-depth: 0
fetch-depth: 1

- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'

- name: Generate Changelog Update
- name: Configure Git
run: |
npm install -g conventional-changelog-cli
conventional-changelog -p angular -i CHANGELOG.md -s
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Check if "auto-changelog-update" branch exists and update if needed
- name: Check if "auto-changelog-update-do-not-create-manually" branch exists
id: check_branch
run: |
git fetch origin auto-changelog-update:auto-changelog-update --no-tags
if git rev-parse --verify --quiet auto-changelog-update; then
echo "::set-output name=branch_exists::true"
git checkout auto-changelog-update
git merge main --no-edit
if [ -n "$(git ls-remote --heads origin auto-changelog-update-do-not-create-manually)" ]; then
git fetch origin auto-changelog-update-do-not-create-manually
echo "branch_exists=true" >> $GITHUB_ENV
else
echo "::set-output name=branch_exists::false"
git checkout -b auto-changelog-update
echo "branch_exists=false" >> $GITHUB_ENV
fi
- name: Create or Update Pull Request
- name: Generate Changelog Update and update if branch exists
run: |
npm install -g conventional-changelog-cli
if [ "$branch_exists" == "true" ]; then
git checkout auto-changelog-update-do-not-create-manually
git merge main --strategy-option theirs --allow-unrelated-histories --no-edit
conventional-changelog -p angular -i CHANGELOG.md -s -r 0 --append
git add CHANGELOG.md
git commit -m "Update CHANGELOG.md [skip ci]"
git push origin auto-changelog-update-do-not-create-manually
exit 0
else
git checkout main
git checkout -b auto-changelog-update-do-not-create-manually
git push origin auto-changelog-update-do-not-create-manually
conventional-changelog -p angular -i CHANGELOG.md -s -r 0 --append
fi
- name: Create Pull Request
if: env.branch_exists == 'false'
id: cpr
uses: peter-evans/create-pull-request@v4
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "Update CHANGELOG.md [skip ci]"
title: "Automated Changelog Update"
title: "Chore(changelog): Automated Changelog Update [skip ci]"
body: "Update the CHANGELOG.md with recent pushes to branch main."
branch: "auto-changelog-update" # Static branch name
base: "main"
branch: "auto-changelog-update-do-not-create-manually"
delete-branch: true
branch-suffix: none
- name: Check if PR needs to be updated

- name: Check outputs
if: env.branch_exists == 'false'
run: |
echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
- name: Log if PR updated
if: steps.cpr.outputs.pull-request-operation == 'updated'
run: |
echo "Changelog PR updated due to new commit to main."
33 changes: 33 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,36 @@
# [](https://github.com/dymensionxyz/dymint/compare/v1.1.0-rc02...v) (2024-05-07)


### Bug Fixes

* **bug:** sync from da and p2p when starting a node ([#763](https://github.com/dymensionxyz/dymint/issues/763)) ([68ffd05](https://github.com/dymensionxyz/dymint/commit/68ffd05794949ddc42df1c132d1fde5f21b505f4))
* **celestia test:** fix race in test ([#755](https://github.com/dymensionxyz/dymint/issues/755)) ([0b36781](https://github.com/dymensionxyz/dymint/commit/0b367818bf6aa8da4a4fd8e4e5c78223b60b44e0))
* **celestia:** impl retry on submit ([#748](https://github.com/dymensionxyz/dymint/issues/748)) ([61630eb](https://github.com/dymensionxyz/dymint/commit/61630eb458197abe2440a81426210000dff25d40))
* **celestia:** use fixed delay in repeat attempts ([#753](https://github.com/dymensionxyz/dymint/issues/753)) ([53002b0](https://github.com/dymensionxyz/dymint/commit/53002b0a070743811295a98580ba038cac40cc7d))
* **code standards:** renames error -> err in celestia ([#768](https://github.com/dymensionxyz/dymint/issues/768)) ([1189384](https://github.com/dymensionxyz/dymint/commit/1189384d1225b3dd65481c9dedbae423e4f8ac04))
* **da:** fixed da path seperator and encoding issue ([#731](https://github.com/dymensionxyz/dymint/issues/731)) ([3a3b219](https://github.com/dymensionxyz/dymint/commit/3a3b21932750fee7eaaa9c186f78e36e3e597746))
* **DA:** use expo backoff in retries ([#739](https://github.com/dymensionxyz/dymint/issues/739)) ([848085f](https://github.com/dymensionxyz/dymint/commit/848085f70bcaae81fb80da3ab78c4d8b399e13b1))
* **doc:** manager cache comment ([#767](https://github.com/dymensionxyz/dymint/issues/767)) ([b88bf6e](https://github.com/dymensionxyz/dymint/commit/b88bf6e72820c944b290147724255cc8466ada50))
* **logging:** added reason for websocket closed debug msg ([#746](https://github.com/dymensionxyz/dymint/issues/746)) ([3aa7d80](https://github.com/dymensionxyz/dymint/commit/3aa7d80ace92b3b0f79e4f338f10bb94c96ab6dd))
* **logs:** make logs more readable in a couple places, fix race cond ([#749](https://github.com/dymensionxyz/dymint/issues/749)) ([f05ef39](https://github.com/dymensionxyz/dymint/commit/f05ef3957b754c05fbc90aa39eabce80bbe65933))
* **manager:** get fresh height in loop ([#781](https://github.com/dymensionxyz/dymint/issues/781)) ([e4df480](https://github.com/dymensionxyz/dymint/commit/e4df48037a78965dbac9e747dd296f39360e396c))
* **p2p:** validate block before applying and not before caching in p2p gossiping ([#723](https://github.com/dymensionxyz/dymint/issues/723)) ([98371b5](https://github.com/dymensionxyz/dymint/commit/98371b5220613e70f3274fab5593e02ba532f7db))
* **p2p:** validating gossiped block is created by the proposer ([#737](https://github.com/dymensionxyz/dymint/issues/737)) ([851b312](https://github.com/dymensionxyz/dymint/commit/851b312620233a9fb1abe55214a678322e7b0c68))
* **produce loop:** handle unauthenticated error in settlement layer ([#726](https://github.com/dymensionxyz/dymint/issues/726)) ([33e78d1](https://github.com/dymensionxyz/dymint/commit/33e78d116b5f14b91b8b3bda2b6cbfee9040e2d3))
* **rpc:** nil panic in rpc/json/handler.go WriteError ([#750](https://github.com/dymensionxyz/dymint/issues/750)) ([e09709b](https://github.com/dymensionxyz/dymint/commit/e09709b428a33da002defb9f13178fa19b81a69b))
* **settlement:** remove state index from proto ([#777](https://github.com/dymensionxyz/dymint/issues/777)) ([767b8fd](https://github.com/dymensionxyz/dymint/commit/767b8fdb490c37deee43ac023688410bbb98ccb0))
* **sync:** make sure we use a latest state index as a start point ([#760](https://github.com/dymensionxyz/dymint/issues/760)) ([43e2d96](https://github.com/dymensionxyz/dymint/commit/43e2d965f2b505751f8e5260549e909c976141ee))
* **sync:** removing height condition for applying cached blocks from p2p ([#787](https://github.com/dymensionxyz/dymint/issues/787)) ([b97299c](https://github.com/dymensionxyz/dymint/commit/b97299ce7f78168863c5e1c2d7fc479aed2ae6da))
* **tests:** fix unit tests, mocks, cleanup/dry hub queries ([#782](https://github.com/dymensionxyz/dymint/issues/782)) ([c276aea](https://github.com/dymensionxyz/dymint/commit/c276aea12c9cd37f62fcf9d684c4efe901a510bf))


### Features

* **p2p:** header gossiper removed ([#813](https://github.com/dymensionxyz/dymint/issues/813)) ([737b412](https://github.com/dymensionxyz/dymint/commit/737b4126c59846a2be57049a249843de5648dde8))
* **produce:** limiting block size by maxBatchSize ([#784](https://github.com/dymensionxyz/dymint/issues/784)) ([f90042c](https://github.com/dymensionxyz/dymint/commit/f90042cd61fc6b60093478cd65491f8aa1106457))



# [](https://github.com/dymensionxyz/dymint/compare/v1.1.0-rc02...v) (2024-05-05)


Expand Down
2 changes: 1 addition & 1 deletion block/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHead
}
copy(block.Header.LastCommitHash[:], e.getLastCommitHash(lastCommit, &block.Header))
copy(block.Header.DataHash[:], e.getDataHash(block))
copy(block.Header.AggregatorsHash[:], state.Validators.Hash())
copy(block.Header.SequencersHash[:], state.Validators.Hash())

return block
}
Expand Down
13 changes: 10 additions & 3 deletions block/gossip.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,28 @@ import (

// onNewGossippedBlock will take a block and apply it
func (m *Manager) onNewGossipedBlock(event pubsub.Message) {
m.retrieverMutex.Lock() // needed to protect blockCache access
eventData := event.Data().(p2p.GossipedBlock)
block := eventData.Block
commit := eventData.Commit
m.logger.Debug("Received new block via gossip", "height", block.Header.Height, "n cachedBlocks", len(m.blockCache))
m.retrieverMutex.Lock() // needed to protect blockCache access
_, found := m.blockCache[block.Header.Height]
// It is not strictly necessary to return early, for correctness, but doing so helps us avoid mutex pressure and unnecessary repeated attempts to apply cached blocks
if found {
m.retrieverMutex.Unlock()
return
}

m.logger.Debug("Received new block via gossip", "block height", block.Header.Height, "store height", m.State.Height(), "n cachedBlocks", len(m.blockCache))

nextHeight := m.State.NextHeight()
if block.Header.Height >= nextHeight {
m.blockCache[block.Header.Height] = CachedBlock{
Block: &block,
Commit: &commit,
}
m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.State.Height())
}
m.retrieverMutex.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant

err := m.attemptApplyCachedBlocks()
if err != nil {
m.logger.Error("applying cached blocks", "err", err)
Expand Down
12 changes: 6 additions & 6 deletions block/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,17 +126,17 @@ func (m *Manager) Start(ctx context.Context) error {
m.logger.Info("Starting the block manager")

// Check if proposer key matches to the one in the settlement layer
var isAggregator bool
var isSequencer bool
slProposerKey := m.SLClient.GetProposer().PublicKey.Bytes()
localProposerKey, err := m.ProposerKey.GetPublic().Raw()
if err != nil {
return fmt.Errorf("get local node public key: %w", err)
}
if bytes.Equal(slProposerKey, localProposerKey) {
m.logger.Info("Starting in aggregator mode")
isAggregator = true
m.logger.Info("Starting in sequencer mode")
isSequencer = true
} else {
m.logger.Info("Starting in non-aggregator mode")
m.logger.Info("Starting in non-sequencer mode")
}

// Check if InitChain flow is needed
Expand All @@ -149,7 +149,7 @@ func (m *Manager) Start(ctx context.Context) error {
}
}

if !isAggregator {
if !isSequencer {
go uevent.MustSubscribe(ctx, m.Pubsub, "applyGossipedBlocksLoop", p2p.EventQueryNewNewGossipedBlock, m.onNewGossipedBlock, m.logger)
}

Expand All @@ -158,7 +158,7 @@ func (m *Manager) Start(ctx context.Context) error {
return fmt.Errorf("sync block manager: %w", err)
}

if isAggregator {
if isSequencer {
// TODO: populate the accumulatedSize on startup
go m.ProduceBlockLoop(ctx)
go m.SubmitLoop(ctx)
Expand Down
8 changes: 3 additions & 5 deletions block/submit.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,9 @@ func (m *Manager) SubmitLoop(ctx context.Context) {
}

/*
Note: since we dont explicitly coordinate changes to the accumulated size with actual batch creation
we don't have a guarantee that the accumulated size is the same as the actual batch size that will be made.
See https://github.com/dymensionxyz/dymint/issues/828
Until that is fixed, it's technically possibly to undercount, by having a some blocks be produced in between
setting the counter to 0, and actually producing the batch.
Note: since we dont explicitly coordinate changes to the accumulated size with actual batch creation
we don't have a guarantee that the accumulated size is the same as the actual batch size that will be made.
But the batch creation step will also check the size is OK, so it's not a problem.
*/
m.AccumulatedBatchSize.Store(0)

Expand Down
4 changes: 2 additions & 2 deletions block/synctarget.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
)

// SyncTargetLoop is responsible for getting real time updates about settlement batch submissions.
// For non aggregator: updating the sync target which will be used by retrieveLoop to sync until this target.
// For non sequencer: updating the sync target which will be used by retrieveLoop to sync until this target.
// It publishes new sync height targets which will then be synced by another process.
func (m *Manager) SyncTargetLoop(ctx context.Context) {
m.logger.Info("Started sync target loop")
Expand Down Expand Up @@ -38,7 +38,7 @@ func (m *Manager) SyncTargetLoop(ctx context.Context) {
m.UpdateSyncParams(eventData.EndHeight)
m.SyncTargetDiode.Set(diodes.GenericDataType(&eventData.EndHeight))
case <-subscription.Cancelled():
m.logger.Info("syncTargetLoop subscription canceled")
m.logger.Error("syncTargetLoop subscription canceled")
return
}
}
Expand Down
6 changes: 3 additions & 3 deletions config/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ func AddNodeFlags(cmd *cobra.Command) {

cmd.Flags().String(FlagDALayer, def.DALayer, "Data Availability Layer Client name (mock or grpc")
cmd.Flags().String(FlagDAConfig, def.DAConfig, "Data Availability Layer Client config")
cmd.Flags().Duration(FlagBlockTime, def.BlockTime, "block time (for aggregator mode)")
cmd.Flags().Duration(FlagMaxIdleTime, def.MaxIdleTime, "max time for empty blocks (for aggregator mode)")
cmd.Flags().Duration(FlagBatchSubmitMaxTime, def.BatchSubmitMaxTime, "max time for batch submit (for aggregator mode)")
cmd.Flags().Duration(FlagBlockTime, def.BlockTime, "block time (for sequencer mode)")
cmd.Flags().Duration(FlagMaxIdleTime, def.MaxIdleTime, "max time for empty blocks (for sequencer mode)")
cmd.Flags().Duration(FlagBatchSubmitMaxTime, def.BatchSubmitMaxTime, "max time for batch submit (for sequencer mode)")
cmd.Flags().String(FlagNamespaceID, def.NamespaceID, "namespace identifies (8 bytes in hex)")
cmd.Flags().Uint64(FlagBlockBatchMaxSizeBytes, def.BlockBatchMaxSizeBytes, "block batch size in bytes")

Expand Down
4 changes: 4 additions & 0 deletions da/celestia/celestia.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ func (c *DataAvailabilityLayerClient) Start() (err error) {
}
return nil
case <-ticker.C:
state, err := rpc.Header.SyncState(c.ctx)
if err != nil {
return err
}
c.logger.Info("celestia-node still syncing", "height", state.Height, "target", state.ToHeight)
}
}
Expand Down
Loading

0 comments on commit 403ec9e

Please sign in to comment.