-
Notifications
You must be signed in to change notification settings - Fork 278
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] add more broadcast logs to scroll-v5.7.11
#1075
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -783,13 +783,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e | |||||||||||||
// If the transaction is already known, discard it | ||||||||||||||
hash := tx.Hash() | ||||||||||||||
if pool.all.Get(hash) != nil { | ||||||||||||||
log.Trace("Discarding already known transaction", "hash", hash) | ||||||||||||||
log.Error("Discarding already known transaction", "hash", hash) | ||||||||||||||
knownTxMeter.Mark(1) | ||||||||||||||
return false, ErrAlreadyKnown | ||||||||||||||
} | ||||||||||||||
|
||||||||||||||
if pool.IsMiner() && rawdb.IsSkippedTransaction(pool.chain.Database(), hash) { | ||||||||||||||
log.Trace("Discarding already known skipped transaction", "hash", hash) | ||||||||||||||
log.Error("Discarding already known skipped transaction", "hash", hash) | ||||||||||||||
knownSkippedTxMeter.Mark(1) | ||||||||||||||
return false, ErrAlreadyKnown | ||||||||||||||
Comment on lines
+793
to
795
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Adjust log level and add context for known skipped transactions The current use of Recommendations:
Example: if pool.IsMiner() && rawdb.IsSkippedTransaction(pool.chain.Database(), hash) {
log.Debug("Discarding known skipped transaction", "hash", hash, "reason", "previously skipped by miner")
knownSkippedTxMeter.Mark(1)
return false, ErrAlreadyKnown
} |
||||||||||||||
} | ||||||||||||||
|
@@ -800,15 +800,15 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e | |||||||||||||
|
||||||||||||||
// If the transaction fails basic validation, discard it | ||||||||||||||
if err := pool.validateTx(tx, isLocal); err != nil { | ||||||||||||||
log.Trace("Discarding invalid transaction", "hash", hash, "err", err) | ||||||||||||||
log.Error("Discarding invalid transaction", "hash", hash, "err", err) | ||||||||||||||
invalidTxMeter.Mark(1) | ||||||||||||||
return false, err | ||||||||||||||
Comment on lines
+804
to
806
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Reconsider log level for invalid transactions and add rate limiting. Changing the log level for invalid transactions from
While invalid transactions are more concerning than known transactions, not all of them may warrant an error log. Consider the following improvements:
- log.Error("Discarding invalid transaction", "hash", hash, "err", err)
+ if isSevereError(err) {
+ log.Error("Discarding invalid transaction", "hash", hash, "err", err)
+ } else {
+ log.Warn("Discarding invalid transaction", "hash", hash, "err", err)
+ }
+ logInvalidTxCounter.Inc(1) Also, consider adding a rate limiter to this logging call and a function
|
||||||||||||||
} | ||||||||||||||
// If the transaction pool is full, discard underpriced transactions | ||||||||||||||
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { | ||||||||||||||
// If the new transaction is underpriced, don't accept it | ||||||||||||||
if !isLocal && pool.priced.Underpriced(tx) { | ||||||||||||||
log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) | ||||||||||||||
log.Error("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) | ||||||||||||||
underpricedTxMeter.Mark(1) | ||||||||||||||
return false, ErrUnderpriced | ||||||||||||||
Comment on lines
+812
to
814
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Adjust log level for underpriced transactions and consider aggregating logs. The changes to this log statement have both positive and potentially problematic aspects: Positives:
Areas for improvement:
Suggestions:
- log.Error("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ log.Debug("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ if metrics.Enabled {
+ underpricedTxCounter.Inc(1)
+ } Also, consider adding a periodic log that summarizes the number of underpriced transactions discarded in the last interval, rather than logging each one individually.
|
||||||||||||||
} | ||||||||||||||
|
@@ -817,6 +817,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e | |||||||||||||
// do too many replacements between reorg-runs, so we cap the number of | ||||||||||||||
// replacements to 25% of the slots | ||||||||||||||
if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { | ||||||||||||||
log.Error("Discarding transaction due to too many changes since reorg", "hash", hash) | ||||||||||||||
throttleTxMeter.Mark(1) | ||||||||||||||
return false, ErrTxPoolOverflow | ||||||||||||||
Comment on lines
+821
to
823
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Adjust log level and add context for transactions discarded due to reorg frequency. The change from
- log.Error("Discarding transaction due to too many changes since reorg", "hash", hash)
+ log.Warn("Discarding transaction due to high reorg frequency", "hash", hash, "changesSinceReorg", pool.changesSinceReorg, "threshold", int(pool.config.GlobalSlots/4)) Consider also adding a metric to track how often this situation occurs, as it might indicate network instability or other issues that require attention. 📝 Committable suggestion
Suggested change
|
||||||||||||||
} | ||||||||||||||
|
@@ -828,52 +829,67 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e | |||||||||||||
|
||||||||||||||
// Special case, we still can't make the room for the new remote one. | ||||||||||||||
if !isLocal && !success { | ||||||||||||||
log.Trace("Discarding overflown transaction", "hash", hash) | ||||||||||||||
log.Error("Discarding overflown transaction", "hash", hash) | ||||||||||||||
overflowedTxMeter.Mark(1) | ||||||||||||||
return false, ErrTxPoolOverflow | ||||||||||||||
Comment on lines
+833
to
835
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Refine logging for overflown transactions with appropriate level and additional context. The change from
- log.Error("Discarding overflown transaction", "hash", hash)
+ log.Warn("Discarding transaction due to full pool", "hash", hash, "poolSize", pool.all.Count(), "capacity", pool.config.GlobalSlots+pool.config.GlobalQueue)
+ if metrics.Enabled {
+ overflownTxCounter.Inc(1)
+ } Also, consider adding a periodic summary log that provides an overview of the pool's state and the number of overflown transactions in the last interval. This would provide valuable insights without the risk of log flooding.
|
||||||||||||||
} | ||||||||||||||
// Bump the counter of rejections-since-reorg | ||||||||||||||
pool.changesSinceReorg += len(drop) | ||||||||||||||
// Kick out the underpriced remote transactions. | ||||||||||||||
for _, tx := range drop { | ||||||||||||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) | ||||||||||||||
log.Error("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) | ||||||||||||||
underpricedTxMeter.Mark(1) | ||||||||||||||
pool.removeTx(tx.Hash(), false) | ||||||||||||||
} | ||||||||||||||
Comment on lines
+841
to
844
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Harmonize logging for underpriced transactions and consider aggregation. This log statement is very similar to the one we reviewed earlier for underpriced transactions. For consistency and improved logging practices, consider the following:
- log.Error("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ log.Debug("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap(), "minTip", pool.gasPrice, "minFeeCap", pool.priced.urgent.baseFee)
+ if metrics.Enabled {
+ underpricedTxCounter.Inc(1)
+ } Additionally, implement a periodic summary log that provides an overview of underpriced transactions discarded in the last interval, including both fresh and existing ones. This would offer valuable insights without risking log saturation.
|
||||||||||||||
} | ||||||||||||||
// Try to replace an existing transaction in the pending pool | ||||||||||||||
from, _ := types.Sender(pool.signer, tx) // already validated | ||||||||||||||
|
||||||||||||||
log.Info("already validated", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
|
||||||||||||||
Comment on lines
+848
to
+850
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Reconsider logging level and content for transaction validation While adding more visibility to the transaction validation process can be beneficial, the current implementation may lead to several issues:
Recommendations:
Example: func logVerbose(format string, args ...interface{}) {
if verboseLogging {
log.Debug(fmt.Sprintf(format, args...))
}
}
// Usage
logVerbose("Transaction validated", "hash", hash) |
||||||||||||||
if list := pool.pending[from]; list != nil && list.Overlaps(tx) { | ||||||||||||||
|
||||||||||||||
log.Info("already pending", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
|
||||||||||||||
// Nonce already pending, check if required price bump is met | ||||||||||||||
inserted, old := list.Add(tx, pool.currentState, pool.config.PriceBump, pool.chainconfig, pool.currentHead) | ||||||||||||||
|
||||||||||||||
log.Info("already pending", "hash", hash, "inserted", inserted, "old", old.Hash().Hex()) | ||||||||||||||
|
||||||||||||||
if !inserted { | ||||||||||||||
log.Error("already pending, ErrReplaceUnderpriced", "hash", hash) | ||||||||||||||
pendingDiscardMeter.Mark(1) | ||||||||||||||
return false, ErrReplaceUnderpriced | ||||||||||||||
} | ||||||||||||||
// New transaction is better, replace old one | ||||||||||||||
if old != nil { | ||||||||||||||
log.Info("already pending, new transaction is better, replace old one", "hash", hash, "old", old.Hash().Hex()) | ||||||||||||||
pool.all.Remove(old.Hash()) | ||||||||||||||
pool.calculateTxsLifecycle(types.Transactions{old}, time.Now()) | ||||||||||||||
pool.priced.Removed(1) | ||||||||||||||
pendingReplaceMeter.Mark(1) | ||||||||||||||
} | ||||||||||||||
log.Info("already pending 1", "hash", hash) | ||||||||||||||
pool.all.Add(tx, isLocal) | ||||||||||||||
pool.priced.Put(tx, isLocal) | ||||||||||||||
pool.journalTx(from, tx) | ||||||||||||||
pool.queueTxEvent(tx) | ||||||||||||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
log.Info("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
|
||||||||||||||
// Successful promotion, bump the heartbeat | ||||||||||||||
pool.beats[from] = time.Now() | ||||||||||||||
return old != nil, nil | ||||||||||||||
} | ||||||||||||||
// New transaction isn't replacing a pending one, push into queue | ||||||||||||||
log.Info("new transaction isn't replacing a pending one, push into queue", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
replaced, err = pool.enqueueTx(hash, tx, isLocal, true) | ||||||||||||||
if err != nil { | ||||||||||||||
return false, err | ||||||||||||||
} | ||||||||||||||
// Mark local addresses and journal local transactions | ||||||||||||||
log.Info("mark local addresses and journal local transactions", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
if local && !pool.locals.contains(from) { | ||||||||||||||
log.Info("Setting new local account", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
log.Info("Setting new local account", "address", from) | ||||||||||||||
pool.locals.add(from) | ||||||||||||||
pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. | ||||||||||||||
|
@@ -883,7 +899,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e | |||||||||||||
} | ||||||||||||||
pool.journalTx(from, tx) | ||||||||||||||
|
||||||||||||||
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
log.Info("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) | ||||||||||||||
return replaced, nil | ||||||||||||||
} | ||||||||||||||
|
||||||||||||||
|
@@ -1084,6 +1100,7 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, | |||||||||||||
replaced, err := pool.add(tx, local) | ||||||||||||||
errs[i] = err | ||||||||||||||
if err == nil && !replaced { | ||||||||||||||
log.Info("dirty.addTx", "hash", tx.Hash()) | ||||||||||||||
dirty.addTx(tx) | ||||||||||||||
} | ||||||||||||||
} | ||||||||||||||
|
@@ -1262,6 +1279,7 @@ func (pool *TxPool) scheduleReorgLoop() { | |||||||||||||
} else { | ||||||||||||||
dirtyAccounts.merge(req) | ||||||||||||||
} | ||||||||||||||
// TODO: print dirtyAccounts related in reorg | ||||||||||||||
launchNextRun = true | ||||||||||||||
pool.reorgDoneCh <- nextDone | ||||||||||||||
|
||||||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -795,6 +795,10 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, | |
}) | ||
|
||
log.Debug("Scheduling transaction retrieval", "peer", peer, "len(f.announces[peer])", len(f.announces[peer]), "len(hashes)", len(hashes)) | ||
for _, hash := range hashes { | ||
log.Info("Scheduling transaction retrieval", "peer", peer, "len(f.announces[peer])", len(f.announces[peer]), "len(hashes)", len(hashes), "hash", hash) | ||
} | ||
|
||
Comment on lines
+798
to
+801
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Consider adjusting log levels and reducing log verbosity The added logging statements provide valuable information for debugging the transaction retrieval process. However, there are a few considerations:
Consider the following improvements:
-log.Info("Scheduling transaction retrieval", "peer", peer, "len(f.announces[peer])", len(f.announces[peer]), "len(hashes)", len(hashes), "hash", hash)
+log.Debug("Scheduling transaction retrieval", "peer", peer, "len(f.announces[peer])", len(f.announces[peer]), "len(hashes)", len(hashes), "hash", hash)
if len(hashes) > 0 {
log.Info("Scheduled transaction retrievals", "peer", peer, "announcements", len(f.announces[peer]), "retrievals", len(hashes))
} These changes will maintain the detailed logging for debugging purposes while reducing the verbosity and potential performance impact in production environments. |
||
peerAnnounceTxsLenGauge.Update(int64(len(f.announces[peer]))) | ||
peerRetrievalTxsLenGauge.Update(int64(len(hashes))) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -519,12 +519,18 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { | |
directCount += len(hashes) | ||
peer.AsyncSendTransactions(hashes) | ||
log.Debug("Transactions being broadcasted to", "peer", peer.String(), "len", len(hashes)) | ||
for _, hash := range hashes { | ||
log.Info("Transactions being broadcasted to", "peer", peer.String(), "len", len(hashes), "hash", hash) | ||
} | ||
Comment on lines
+522
to
+524
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider optimizing the new logging statements. While the additional logging provides valuable information for debugging, there are a few concerns:
Consider the following improvements:
for peer, hashes := range txset {
directPeers++
directCount += len(hashes)
peer.AsyncSendTransactions(hashes)
log.Debug("Transactions being broadcasted to", "peer", peer.String(), "len", len(hashes))
- for _, hash := range hashes {
- log.Info("Transactions being broadcasted to", "peer", peer.String(), "len", len(hashes), "hash", hash)
- }
}
+log.Debug("Transactions broadcast details", "directPeers", directPeers, "directCount", directCount)
for peer, hashes := range annos {
annoPeers++
annoCount += len(hashes)
peer.AsyncSendPooledTransactionHashes(hashes)
log.Debug("Transactions being announced to", "peer", peer.String(), "len", len(hashes))
- for _, hash := range hashes {
- log.Info("Transactions being announced to", "peer", peer.String(), "len", len(hashes), "hash", hash)
- }
}
+log.Debug("Transactions announcement details", "annoPeers", annoPeers, "annoCount", annoCount) This approach will provide the necessary information for debugging while minimizing the performance impact and log verbosity. Also applies to: 531-533 |
||
} | ||
for peer, hashes := range annos { | ||
annoPeers++ | ||
annoCount += len(hashes) | ||
peer.AsyncSendPooledTransactionHashes(hashes) | ||
log.Debug("Transactions being announced to", "peer", peer.String(), "len", len(hashes)) | ||
for _, hash := range hashes { | ||
log.Info("Transactions being announced to", "peer", peer.String(), "len", len(hashes), "hash", hash) | ||
} | ||
} | ||
log.Debug("Transaction broadcast", "txs", len(txs), | ||
"announce packs", annoPeers, "announced hashes", annoCount, | ||
|
Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -338,12 +338,15 @@ func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) | |||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
ann := new(NewPooledTransactionHashesPacket) | ||||||||||||||||||||||||||||||||||||||||||
if err := msg.Decode(ann); err != nil { | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("Failed to decode `NewPooledTransactionHashesPacket`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("Failed to decode `NewPooledTransactionHashesPacket`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
newPooledTxHashesFailMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
// Schedule all the unknown hashes for retrieval | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handleNewPooledTransactionHashes", "peer", peer.String(), "len(ann)", len(*ann)) | ||||||||||||||||||||||||||||||||||||||||||
for _, hash := range *ann { | ||||||||||||||||||||||||||||||||||||||||||
log.Info("handleNewPooledTransactionHashes", "peer", peer.String(), "len(ann)", len(*ann), "hash", hash.Hex()) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
Comment on lines
+341
to
+349
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider the performance impact of verbose logging The changes improve error visibility by upgrading the log level for decoding errors from Debug to Error, which is good. However, the new Info level log for each transaction hash (line 348) might be too verbose and could potentially impact performance, especially when dealing with a large number of transactions. Consider one of the following options:
Example implementation for option 3: log.Debug("handleNewPooledTransactionHashes", "peer", peer.String(), "len(ann)", len(*ann))
-for _, hash := range *ann {
- log.Info("handleNewPooledTransactionHashes", "peer", peer.String(), "len(ann)", len(*ann), "hash", hash.Hex())
+if len(*ann) <= 10 {
+ for _, hash := range *ann {
+ log.Debug("handleNewPooledTransactionHashes", "peer", peer.String(), "len(ann)", len(*ann), "hash", hash.Hex())
+ }
} This change will limit the logging of individual hashes to cases where there are 10 or fewer transactions, reducing the verbosity for larger batches. 📝 Committable suggestion
Suggested change
|
||||||||||||||||||||||||||||||||||||||||||
newPooledTxHashesLenGauge.Update(int64(len(*ann))) | ||||||||||||||||||||||||||||||||||||||||||
for _, hash := range *ann { | ||||||||||||||||||||||||||||||||||||||||||
peer.markTransaction(hash) | ||||||||||||||||||||||||||||||||||||||||||
|
@@ -355,12 +358,15 @@ func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) err | |||||||||||||||||||||||||||||||||||||||||
// Decode the pooled transactions retrieval message | ||||||||||||||||||||||||||||||||||||||||||
var query GetPooledTransactionsPacket66 | ||||||||||||||||||||||||||||||||||||||||||
if err := msg.Decode(&query); err != nil { | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("Failed to decode `GetPooledTransactionsPacket66`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("Failed to decode `GetPooledTransactionsPacket66`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
getPooledTxsFailMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handleGetPooledTransactions", "peer", peer.String(), "RequestId", query.RequestId, "len(query)", len(query.GetPooledTransactionsPacket), "retrieved", len(hashes)) | ||||||||||||||||||||||||||||||||||||||||||
for _, hash := range hashes { | ||||||||||||||||||||||||||||||||||||||||||
log.Info("handleGetPooledTransactions", "peer", peer.String(), "RequestId", query.RequestId, "len(query)", len(query.GetPooledTransactionsPacket), "retrieved", len(hashes), "hash", hash.Hex()) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
Comment on lines
+361
to
+369
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consistent logging strategy needed Similar to the changes in To maintain consistency and prevent performance issues, consider applying the same logging strategy as suggested for log.Debug("handleGetPooledTransactions", "peer", peer.String(), "RequestId", query.RequestId, "len(query)", len(query.GetPooledTransactionsPacket), "retrieved", len(hashes))
-for _, hash := range hashes {
- log.Info("handleGetPooledTransactions", "peer", peer.String(), "RequestId", query.RequestId, "len(query)", len(query.GetPooledTransactionsPacket), "retrieved", len(hashes), "hash", hash.Hex())
+if len(hashes) <= 10 {
+ for _, hash := range hashes {
+ log.Debug("handleGetPooledTransactions", "peer", peer.String(), "RequestId", query.RequestId, "len(query)", len(query.GetPooledTransactionsPacket), "retrieved", len(hashes), "hash", hash.Hex())
+ }
} This change will limit the logging of individual hashes to cases where there are 10 or fewer transactions, reducing the verbosity for larger batches while maintaining consistency with the previous function. 📝 Committable suggestion
Suggested change
|
||||||||||||||||||||||||||||||||||||||||||
getPooledTxsQueryLenGauge.Update(int64(len(query.GetPooledTransactionsPacket))) | ||||||||||||||||||||||||||||||||||||||||||
getPooledTxsRetrievedLenGauge.Update(int64(len(hashes))) | ||||||||||||||||||||||||||||||||||||||||||
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) | ||||||||||||||||||||||||||||||||||||||||||
|
@@ -403,16 +409,19 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { | |||||||||||||||||||||||||||||||||||||||||
var txs TransactionsPacket | ||||||||||||||||||||||||||||||||||||||||||
if err := msg.Decode(&txs); err != nil { | ||||||||||||||||||||||||||||||||||||||||||
handleTxsFailMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("Failed to decode `TransactionsPacket`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("Failed to decode `TransactionsPacket`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handleTransactions", "peer", peer.String(), "len(txs)", len(txs)) | ||||||||||||||||||||||||||||||||||||||||||
for _, tx := range txs { | ||||||||||||||||||||||||||||||||||||||||||
log.Info("handleTransactions", "peer", peer.String(), "len(txs)", len(txs), "tx", tx.Hash().Hex()) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
Comment on lines
+412
to
+418
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Improve logging consistency and nil transaction handling The changes in this function follow a similar pattern to the previous ones, with improved error visibility and potentially verbose transaction logging. Additionally, there's a new check for nil transactions.
Consider the following changes:
log.Debug("handleTransactions", "peer", peer.String(), "len(txs)", len(txs))
-for _, tx := range txs {
- log.Info("handleTransactions", "peer", peer.String(), "len(txs)", len(txs), "tx", tx.Hash().Hex())
+if len(txs) <= 10 {
+ for _, tx := range txs {
+ log.Debug("handleTransactions", "peer", peer.String(), "len(txs)", len(txs), "tx", tx.Hash().Hex())
+ }
}
if tx == nil {
handleTxsNilMeter.Mark(1)
log.Error("handleTransactions: transaction is nil", "peer", peer.String(), "i", i)
+ nilTxCounter.Inc(1)
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
} These changes will maintain consistency with previous functions and provide additional metrics for monitoring nil transactions. Also applies to: 424-425 |
||||||||||||||||||||||||||||||||||||||||||
handleTxsLenGauge.Update(int64(len(txs))) | ||||||||||||||||||||||||||||||||||||||||||
for i, tx := range txs { | ||||||||||||||||||||||||||||||||||||||||||
// Validate and mark the remote transaction | ||||||||||||||||||||||||||||||||||||||||||
if tx == nil { | ||||||||||||||||||||||||||||||||||||||||||
handleTxsNilMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handleTransactions: transaction is nil", "peer", peer.String(), "i", i) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("handleTransactions: transaction is nil", "peer", peer.String(), "i", i) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: transaction %d is nil", errDecode, i) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
peer.markTransaction(tx.Hash()) | ||||||||||||||||||||||||||||||||||||||||||
|
@@ -429,7 +438,7 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error | |||||||||||||||||||||||||||||||||||||||||
var txs PooledTransactionsPacket66 | ||||||||||||||||||||||||||||||||||||||||||
if err := msg.Decode(&txs); err != nil { | ||||||||||||||||||||||||||||||||||||||||||
pooledTxs66FailMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("Failed to decode `PooledTransactionsPacket66`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("Failed to decode `PooledTransactionsPacket66`", "peer", peer.String(), "err", err) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handlePooledTransactions66", "peer", peer.String(), "len(txs)", len(txs.PooledTransactionsPacket)) | ||||||||||||||||||||||||||||||||||||||||||
|
@@ -438,7 +447,7 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error | |||||||||||||||||||||||||||||||||||||||||
// Validate and mark the remote transaction | ||||||||||||||||||||||||||||||||||||||||||
if tx == nil { | ||||||||||||||||||||||||||||||||||||||||||
pooledTxs66NilMeter.Mark(1) | ||||||||||||||||||||||||||||||||||||||||||
log.Debug("handlePooledTransactions: transaction is nil", "peer", peer.String(), "i", i) | ||||||||||||||||||||||||||||||||||||||||||
log.Error("handlePooledTransactions: transaction is nil", "peer", peer.String(), "i", i) | ||||||||||||||||||||||||||||||||||||||||||
return fmt.Errorf("%w: transaction %d is nil", errDecode, i) | ||||||||||||||||||||||||||||||||||||||||||
} | ||||||||||||||||||||||||||||||||||||||||||
peer.markTransaction(tx.Hash()) | ||||||||||||||||||||||||||||||||||||||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider using a less severe log level for discarding known transactions.
The log level for discarding already known transactions has been changed from
log.Trace
tolog.Error
. While it's important to track discarded transactions, usinglog.Error
for what could be a common occurrence might lead to several issues:Consider using
log.Debug
orlog.Info
instead, which would provide visibility without the drawbacks of using the error level.📝 Committable suggestion