all: fix mismatched names in comments (#29348)
* all: fix mismatched names in comments * metrics: fix mismatched name in UpdateIfGt
This commit is contained in:
parent
58a3e2f180
commit
723b1e36ad
@ -127,7 +127,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
|
|||||||
return arguments.copyAtomic(v, values[0])
|
return arguments.copyAtomic(v, values[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
// copyAtomic copies ( hexdata -> go ) a single value
|
||||||
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
|
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
|
||||||
dst := reflect.ValueOf(v).Elem()
|
dst := reflect.ValueOf(v).Elem()
|
||||||
src := reflect.ValueOf(marshalledValues)
|
src := reflect.ValueOf(marshalledValues)
|
||||||
|
@ -51,7 +51,7 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// waitWatcherStarts waits up to 1s for the keystore watcher to start.
|
// waitWatcherStart waits up to 1s for the keystore watcher to start.
|
||||||
func waitWatcherStart(ks *KeyStore) bool {
|
func waitWatcherStart(ks *KeyStore) bool {
|
||||||
// On systems where file watch is not supported, just return "ok".
|
// On systems where file watch is not supported, just return "ok".
|
||||||
if !ks.cache.watcher.enabled() {
|
if !ks.cache.watcher.enabled() {
|
||||||
|
@ -289,7 +289,7 @@ func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHead fetches and validates the beacon header with the given blockRoot.
|
// GetHeader fetches and validates the beacon header with the given blockRoot.
|
||||||
// If blockRoot is null hash then the latest head header is fetched.
|
// If blockRoot is null hash then the latest head header is fetched.
|
||||||
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) {
|
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) {
|
||||||
var blockId string
|
var blockId string
|
||||||
|
@ -56,7 +56,7 @@ func (h *HeadTracker) ValidatedHead() (types.SignedHeader, bool) {
|
|||||||
return h.signedHead, h.hasSignedHead
|
return h.signedHead, h.hasSignedHead
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidatedHead returns the latest validated head.
|
// ValidatedFinality returns the latest validated finality.
|
||||||
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
|
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
|
||||||
h.lock.RLock()
|
h.lock.RLock()
|
||||||
defer h.lock.RUnlock()
|
defer h.lock.RUnlock()
|
||||||
@ -64,7 +64,7 @@ func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
|
|||||||
return h.finalityUpdate, h.hasFinalityUpdate
|
return h.finalityUpdate, h.hasFinalityUpdate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates the given signed head. If the head is successfully validated
|
// ValidateHead validates the given signed head. If the head is successfully validated
|
||||||
// and it is better than the old validated head (higher slot or same slot and more
|
// and it is better than the old validated head (higher slot or same slot and more
|
||||||
// signers) then ValidatedHead is updated. The boolean return flag signals if
|
// signers) then ValidatedHead is updated. The boolean return flag signals if
|
||||||
// ValidatedHead has been changed.
|
// ValidatedHead has been changed.
|
||||||
|
@ -212,7 +212,7 @@ func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop stops all goroutines associated with the server.
|
// unsubscribe stops all goroutines associated with the server.
|
||||||
func (s *serverWithTimeout) unsubscribe() {
|
func (s *serverWithTimeout) unsubscribe() {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
@ -337,7 +337,7 @@ func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
|
|||||||
return s.serverWithTimeout.sendRequest(request)
|
return s.serverWithTimeout.sendRequest(request)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop stops all goroutines associated with the server.
|
// unsubscribe stops all goroutines associated with the server.
|
||||||
func (s *serverWithLimits) unsubscribe() {
|
func (s *serverWithLimits) unsubscribe() {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
@ -101,7 +101,7 @@ func (s *HeadSync) newSignedHead(server request.Server, signedHead types.SignedH
|
|||||||
s.headTracker.ValidateHead(signedHead)
|
s.headTracker.ValidateHead(signedHead)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSignedHead handles received signed head; either validates it if the chain
|
// newFinalityUpdate handles received finality update; either validates it if the chain
|
||||||
// is properly synced or stores it for further validation.
|
// is properly synced or stores it for further validation.
|
||||||
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
|
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
|
||||||
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
|
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
|
||||||
@ -111,7 +111,7 @@ func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types
|
|||||||
s.headTracker.ValidateFinality(finalityUpdate)
|
s.headTracker.ValidateFinality(finalityUpdate)
|
||||||
}
|
}
|
||||||
|
|
||||||
// processUnvalidatedHeads iterates the list of unvalidated heads and validates
|
// processUnvalidated iterates the list of unvalidated heads and validates
|
||||||
// those which can be validated.
|
// those which can be validated.
|
||||||
func (s *HeadSync) processUnvalidated() {
|
func (s *HeadSync) processUnvalidated() {
|
||||||
if !s.chainInit {
|
if !s.chainInit {
|
||||||
|
@ -36,7 +36,7 @@ type ExecutionHeader struct {
|
|||||||
obj headerObject
|
obj headerObject
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeaderFromJSON decodes an execution header from JSON data provided by
|
// ExecutionHeaderFromJSON decodes an execution header from JSON data provided by
|
||||||
// the beacon chain API.
|
// the beacon chain API.
|
||||||
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
|
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
|
||||||
var obj headerObject
|
var obj headerObject
|
||||||
|
@ -48,7 +48,7 @@ func TestAttachWithHeaders(t *testing.T) {
|
|||||||
// This is fixed in a follow-up PR.
|
// This is fixed in a follow-up PR.
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestAttachWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e
|
// TestRemoteDbWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e
|
||||||
// that custom headers are forwarded to the target.
|
// that custom headers are forwarded to the target.
|
||||||
func TestRemoteDbWithHeaders(t *testing.T) {
|
func TestRemoteDbWithHeaders(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -97,7 +97,7 @@ func testExport(t *testing.T, f string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// testDeletion tests if the deletion markers can be exported/imported correctly
|
// TestDeletionExport tests if the deletion markers can be exported/imported correctly
|
||||||
func TestDeletionExport(t *testing.T) {
|
func TestDeletionExport(t *testing.T) {
|
||||||
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -174,7 +174,7 @@ func (l *list[T]) init() {
|
|||||||
l.root.prev = &l.root
|
l.root.prev = &l.root
|
||||||
}
|
}
|
||||||
|
|
||||||
// push adds an element to the front of the list.
|
// pushElem adds an element to the front of the list.
|
||||||
func (l *list[T]) pushElem(e *listElem[T]) {
|
func (l *list[T]) pushElem(e *listElem[T]) {
|
||||||
e.prev = &l.root
|
e.prev = &l.root
|
||||||
e.next = l.root.next
|
e.next = l.root.next
|
||||||
|
@ -568,7 +568,7 @@ var (
|
|||||||
u256_32 = uint256.NewInt(32)
|
u256_32 = uint256.NewInt(32)
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
// accumulateRewards credits the coinbase of the given block with the mining
|
||||||
// reward. The total reward consists of the static block reward and rewards for
|
// reward. The total reward consists of the static block reward and rewards for
|
||||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||||
func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {
|
func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||||
|
@ -127,7 +127,7 @@ func (l *lexer) ignore() {
|
|||||||
l.start = l.pos
|
l.start = l.pos
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accepts checks whether the given input matches the next rune
|
// accept checks whether the given input matches the next rune
|
||||||
func (l *lexer) accept(valid string) bool {
|
func (l *lexer) accept(valid string) bool {
|
||||||
if strings.ContainsRune(valid, l.next()) {
|
if strings.ContainsRune(valid, l.next()) {
|
||||||
return true
|
return true
|
||||||
|
@ -639,7 +639,7 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rewindPathHead implements the logic of rewindHead in the context of hash scheme.
|
// rewindHashHead implements the logic of rewindHead in the context of hash scheme.
|
||||||
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
|
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
|
||||||
var (
|
var (
|
||||||
limit uint64 // The oldest block that will be searched for this rewinding
|
limit uint64 // The oldest block that will be searched for this rewinding
|
||||||
|
@ -482,7 +482,7 @@ func makeBlockChain(chainConfig *params.ChainConfig, parent *types.Block, n int,
|
|||||||
return blocks
|
return blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeBlockChain creates a deterministic chain of blocks from genesis
|
// makeBlockChainWithGenesis creates a deterministic chain of blocks from genesis
|
||||||
func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) {
|
func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) {
|
||||||
db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) {
|
db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) {
|
||||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
||||||
|
@ -695,7 +695,7 @@ func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
|
// deriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
|
||||||
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
|
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
|
||||||
logIndex := uint(0)
|
logIndex := uint(0)
|
||||||
if len(txs) != len(receipts) {
|
if len(txs) != len(receipts) {
|
||||||
|
@ -1226,7 +1226,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
|
|||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a new blob transaction into the pool if it passes validation (both
|
// add inserts a new blob transaction into the pool if it passes validation (both
|
||||||
// consensus validity and pool restrictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
||||||
// The blob pool blocks on adding a transaction. This is because blob txs are
|
// The blob pool blocks on adding a transaction. This is because blob txs are
|
||||||
|
@ -182,7 +182,7 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uin
|
|||||||
return output, suppliedGas, err
|
return output, suppliedGas, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ECRECOVER implemented as a native contract.
|
// ecrecover implemented as a native contract.
|
||||||
type ecrecover struct{}
|
type ecrecover struct{}
|
||||||
|
|
||||||
func (c *ecrecover) RequiredGas(input []byte) uint64 {
|
func (c *ecrecover) RequiredGas(input []byte) uint64 {
|
||||||
@ -457,7 +457,7 @@ func runBn256Add(input []byte) ([]byte, error) {
|
|||||||
return res.Marshal(), nil
|
return res.Marshal(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// bn256Add implements a native elliptic curve point addition conforming to
|
// bn256AddIstanbul implements a native elliptic curve point addition conforming to
|
||||||
// Istanbul consensus rules.
|
// Istanbul consensus rules.
|
||||||
type bn256AddIstanbul struct{}
|
type bn256AddIstanbul struct{}
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ func (ctx *ScopeContext) MemoryData() []byte {
|
|||||||
return ctx.Memory.Data()
|
return ctx.Memory.Data()
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemoryData returns the stack data. Callers must not modify the contents
|
// StackData returns the stack data. Callers must not modify the contents
|
||||||
// of the returned data.
|
// of the returned data.
|
||||||
func (ctx *ScopeContext) StackData() []uint256.Int {
|
func (ctx *ScopeContext) StackData() []uint256.Int {
|
||||||
if ctx.Stack == nil {
|
if ctx.Stack == nil {
|
||||||
|
@ -167,7 +167,7 @@ type btCurve struct {
|
|||||||
*btcec.KoblitzCurve
|
*btcec.KoblitzCurve
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshall converts a point given as (x, y) into a byte slice.
|
// Marshal converts a point given as (x, y) into a byte slice.
|
||||||
func (curve btCurve) Marshal(x, y *big.Int) []byte {
|
func (curve btCurve) Marshal(x, y *big.Int) []byte {
|
||||||
byteLen := (curve.Params().BitSize + 7) / 8
|
byteLen := (curve.Params().BitSize + 7) / 8
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ func newTester(t *testing.T) *downloadTester {
|
|||||||
return newTesterWithNotification(t, nil)
|
return newTesterWithNotification(t, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTester creates a new downloader test mocker.
|
// newTesterWithNotification creates a new downloader test mocker.
|
||||||
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
||||||
freezer := t.TempDir()
|
freezer := t.TempDir()
|
||||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
|
||||||
|
@ -94,7 +94,7 @@ func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
|
// newSkeletonTestPeerWithHook creates a new mock peer to test the skeleton sync with,
|
||||||
// and sets an optional serve hook that can return headers for delivery instead
|
// and sets an optional serve hook that can return headers for delivery instead
|
||||||
// of the predefined chain. Useful for emulating malicious behavior that would
|
// of the predefined chain. Useful for emulating malicious behavior that would
|
||||||
// otherwise require dedicated peer types.
|
// otherwise require dedicated peer types.
|
||||||
|
@ -442,7 +442,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLogFilterUninstall tests invalid getLogs requests
|
// TestInvalidGetLogsRequest tests invalid getLogs requests
|
||||||
func TestInvalidGetLogsRequest(t *testing.T) {
|
func TestInvalidGetLogsRequest(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ func newTestBackend(blocks int) *testBackend {
|
|||||||
return newTestBackendWithGenerator(blocks, false, nil)
|
return newTestBackendWithGenerator(blocks, false, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestBackend creates a chain with a number of explicitly defined blocks and
|
// newTestBackendWithGenerator creates a chain with a number of explicitly defined blocks and
|
||||||
// wraps it into a mock backend.
|
// wraps it into a mock backend.
|
||||||
func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
|
func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
|
||||||
var (
|
var (
|
||||||
|
@ -839,7 +839,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
|
|||||||
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
|
// TestMultiSyncManyUselessWithLowTimeout contains one good peer, and many which doesn't return anything valuable at all
|
||||||
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
|
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@ -1378,7 +1378,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
|
|||||||
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
|
// TestSyncWithStorageMisbehavingProve tests basic sync using accounts + storage + code, against
|
||||||
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
|
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
|
||||||
// an error, where the recipient erroneously clipped the boundary nodes, but
|
// an error, where the recipient erroneously clipped the boundary nodes, but
|
||||||
// did not mark the account for healing.
|
// did not mark the account for healing.
|
||||||
|
@ -61,7 +61,7 @@ type testBackend struct {
|
|||||||
relHook func() // Hook is invoked when the requested state is released
|
relHook func() // Hook is invoked when the requested state is released
|
||||||
}
|
}
|
||||||
|
|
||||||
// testBackend creates a new test backend. OBS: After test is done, teardown must be
|
// newTestBackend creates a new test backend. OBS: After test is done, teardown must be
|
||||||
// invoked in order to release associated resources.
|
// invoked in order to release associated resources.
|
||||||
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
|
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
|
||||||
backend := &testBackend{
|
backend := &testBackend{
|
||||||
|
@ -171,7 +171,7 @@ func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
|
// jsonEqualFlat is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
|
||||||
// comparison
|
// comparison
|
||||||
func jsonEqualFlat(x, y interface{}) bool {
|
func jsonEqualFlat(x, y interface{}) bool {
|
||||||
xTrace := new([]flatCallTrace)
|
xTrace := new([]flatCallTrace)
|
||||||
|
@ -75,7 +75,7 @@ func MemoryPtr(m []byte, offset, size int64) []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Back returns the n'th item in stack
|
// StackBack returns the n'th item in stack
|
||||||
func StackBack(st []uint256.Int, n int) *uint256.Int {
|
func StackBack(st []uint256.Int, n int) *uint256.Int {
|
||||||
return &st[len(st)-n-1]
|
return &st[len(st)-n-1]
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func TestHaltBetweenSteps(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// testNoStepExec tests a regular value transfer (no exec), and accessing the statedb
|
// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb
|
||||||
// in 'result'
|
// in 'result'
|
||||||
func TestNoStepExec(t *testing.T) {
|
func TestNoStepExec(t *testing.T) {
|
||||||
execTracer := func(code string) []byte {
|
execTracer := func(code string) []byte {
|
||||||
|
@ -86,7 +86,7 @@ func (al accessList) equal(other accessList) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// accesslist converts the accesslist to a types.AccessList.
|
// accessList converts the accesslist to a types.AccessList.
|
||||||
func (al accessList) accessList() types.AccessList {
|
func (al accessList) accessList() types.AccessList {
|
||||||
acl := make(types.AccessList, 0, len(al))
|
acl := make(types.AccessList, 0, len(al))
|
||||||
for addr, slots := range al {
|
for addr, slots := range al {
|
||||||
|
@ -514,7 +514,7 @@ func iterateKeys(it ethdb.Iterator) []string {
|
|||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
// randomHash generates a random blob of data and returns it as a hash.
|
// randBytes generates a random blob of data.
|
||||||
func randBytes(len int) []byte {
|
func randBytes(len int) []byte {
|
||||||
buf := make([]byte, len)
|
buf := make([]byte, len)
|
||||||
if n, err := rand.Read(buf); n != len || err != nil {
|
if n, err := rand.Read(buf); n != len || err != nil {
|
||||||
|
@ -239,7 +239,7 @@ func (e *Era) readOffset(n uint64) (int64, error) {
|
|||||||
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
|
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newReader returns a snappy.Reader for the e2store entry value at off.
|
// newSnappyReader returns a snappy.Reader for the e2store entry value at off.
|
||||||
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
|
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
|
||||||
r, n, err := e.ReaderAt(expectedType, off)
|
r, n, err := e.ReaderAt(expectedType, off)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -65,7 +65,7 @@ func ClientName(clientIdentifier string) string {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runtimeInfo returns build and platform information about the current binary.
|
// Info returns build and platform information about the current binary.
|
||||||
//
|
//
|
||||||
// If the package that is currently executing is a prefixed by our go-ethereum
|
// If the package that is currently executing is a prefixed by our go-ethereum
|
||||||
// module path, it will print out commit and date VCS information. Otherwise,
|
// module path, it will print out commit and date VCS information. Otherwise,
|
||||||
|
@ -156,7 +156,7 @@ func (l *logger) Handler() slog.Handler {
|
|||||||
return l.inner.Handler()
|
return l.inner.Handler()
|
||||||
}
|
}
|
||||||
|
|
||||||
// write logs a message at the specified level:
|
// Write logs a message at the specified level:
|
||||||
func (l *logger) Write(level slog.Level, msg string, attrs ...any) {
|
func (l *logger) Write(level slog.Level, msg string, attrs ...any) {
|
||||||
if !l.inner.Enabled(context.Background(), level) {
|
if !l.inner.Enabled(context.Background(), level) {
|
||||||
return
|
return
|
||||||
|
@ -74,7 +74,7 @@ func (g *StandardGauge) Update(v int64) {
|
|||||||
g.value.Store(v)
|
g.value.Store(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the gauge's value if v is larger then the current value.
|
// UpdateIfGt updates the gauge's value if v is larger then the current value.
|
||||||
func (g *StandardGauge) UpdateIfGt(v int64) {
|
func (g *StandardGauge) UpdateIfGt(v int64) {
|
||||||
for {
|
for {
|
||||||
exist := g.value.Load()
|
exist := g.value.Load()
|
||||||
|
@ -173,7 +173,7 @@ type meterArbiter struct {
|
|||||||
|
|
||||||
var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})}
|
var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})}
|
||||||
|
|
||||||
// Ticks meters on the scheduled interval
|
// tick meters on the scheduled interval
|
||||||
func (ma *meterArbiter) tick() {
|
func (ma *meterArbiter) tick() {
|
||||||
for range ma.ticker.C {
|
for range ma.ticker.C {
|
||||||
ma.tickMeters()
|
ma.tickMeters()
|
||||||
|
@ -30,7 +30,7 @@ var enablerFlags = []string{"metrics"}
|
|||||||
// enablerEnvVars is the env var names to use to enable metrics collections.
|
// enablerEnvVars is the env var names to use to enable metrics collections.
|
||||||
var enablerEnvVars = []string{"GETH_METRICS"}
|
var enablerEnvVars = []string{"GETH_METRICS"}
|
||||||
|
|
||||||
// Init enables or disables the metrics system. Since we need this to run before
|
// init enables or disables the metrics system. Since we need this to run before
|
||||||
// any other code gets to create meters and timers, we'll actually do an ugly hack
|
// any other code gets to create meters and timers, we'll actually do an ugly hack
|
||||||
// and peek into the command line args for the metrics flag.
|
// and peek into the command line args for the metrics flag.
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -597,7 +597,7 @@ func newIPCServer(log log.Logger, endpoint string) *ipcServer {
|
|||||||
return &ipcServer{log: log, endpoint: endpoint}
|
return &ipcServer{log: log, endpoint: endpoint}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the httpServer's http.Server
|
// start starts the httpServer's http.Server
|
||||||
func (is *ipcServer) start(apis []rpc.API) error {
|
func (is *ipcServer) start(apis []rpc.API) error {
|
||||||
is.mu.Lock()
|
is.mu.Lock()
|
||||||
defer is.mu.Unlock()
|
defer is.mu.Unlock()
|
||||||
|
@ -44,7 +44,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// meteredConn is a wrapper around a net.UDPConn that meters both the
|
// meteredUdpConn is a wrapper around a net.UDPConn that meters both the
|
||||||
// inbound and outbound network traffic.
|
// inbound and outbound network traffic.
|
||||||
type meteredUdpConn struct {
|
type meteredUdpConn struct {
|
||||||
UDPConn
|
UDPConn
|
||||||
|
@ -48,7 +48,7 @@ func TestGetSetID(t *testing.T) {
|
|||||||
assert.Equal(t, id, id2)
|
assert.Equal(t, id, id2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetSetIP4 tests encoding/decoding and setting/getting of the IP key.
|
// TestGetSetIPv4 tests encoding/decoding and setting/getting of the IP key.
|
||||||
func TestGetSetIPv4(t *testing.T) {
|
func TestGetSetIPv4(t *testing.T) {
|
||||||
ip := IPv4{192, 168, 0, 3}
|
ip := IPv4{192, 168, 0, 3}
|
||||||
var r Record
|
var r Record
|
||||||
@ -59,7 +59,7 @@ func TestGetSetIPv4(t *testing.T) {
|
|||||||
assert.Equal(t, ip, ip2)
|
assert.Equal(t, ip, ip2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetSetIP6 tests encoding/decoding and setting/getting of the IP6 key.
|
// TestGetSetIPv6 tests encoding/decoding and setting/getting of the IP6 key.
|
||||||
func TestGetSetIPv6(t *testing.T) {
|
func TestGetSetIPv6(t *testing.T) {
|
||||||
ip := IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}
|
ip := IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}
|
||||||
var r Record
|
var r Record
|
||||||
|
@ -823,7 +823,7 @@ func (ns *NodeStateMachine) addTimeout(n *enode.Node, mask bitMask, timeout time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeTimeout removes node state timeouts associated to the given state flag(s).
|
// removeTimeouts removes node state timeouts associated to the given state flag(s).
|
||||||
// If a timeout was associated to multiple flags which are not all included in the
|
// If a timeout was associated to multiple flags which are not all included in the
|
||||||
// specified remove mask then only the included flags are de-associated and the timer
|
// specified remove mask then only the included flags are de-associated and the timer
|
||||||
// stays active.
|
// stays active.
|
||||||
|
@ -299,7 +299,7 @@ func RegisterLifecycles(lifecycles LifecycleConstructors) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// adds the host part to the configuration's ENR, signs it
|
// adds the host part to the configuration's ENR, signs it
|
||||||
// creates and the corresponding enode object to the configuration
|
// creates and adds the corresponding enode object to the configuration
|
||||||
func (n *NodeConfig) initEnode(ip net.IP, tcpport int, udpport int) error {
|
func (n *NodeConfig) initEnode(ip net.IP, tcpport int, udpport int) error {
|
||||||
enrIp := enr.IP(ip)
|
enrIp := enr.IP(ip)
|
||||||
n.Record.Set(&enrIp)
|
n.Record.Set(&enrIp)
|
||||||
|
@ -838,7 +838,7 @@ func TestMsgFilterPassSingle(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMsgFilterPassSingle tests streaming message events using an invalid
|
// TestMsgFilterFailBadParams tests streaming message events using an invalid
|
||||||
// filter
|
// filter
|
||||||
func TestMsgFilterFailBadParams(t *testing.T) {
|
func TestMsgFilterFailBadParams(t *testing.T) {
|
||||||
// start the server
|
// start the server
|
||||||
|
@ -535,7 +535,7 @@ func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node {
|
|||||||
return net.getRandomUpNode(excludeIDs...)
|
return net.getRandomUpNode(excludeIDs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRandomUpNode returns a random node on the network, which is running.
|
// getRandomUpNode returns a random node on the network, which is running.
|
||||||
func (net *Network) getRandomUpNode(excludeIDs ...enode.ID) *Node {
|
func (net *Network) getRandomUpNode(excludeIDs ...enode.ID) *Node {
|
||||||
return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
|
return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
|
||||||
}
|
}
|
||||||
|
@ -388,7 +388,7 @@ func (h *handler) startCallProc(fn func(*callProc)) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleResponse processes method call responses.
|
// handleResponses processes method call responses.
|
||||||
func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) {
|
func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) {
|
||||||
var resolvedops []*requestOp
|
var resolvedops []*requestOp
|
||||||
handleResp := func(msg *jsonrpcMessage) {
|
handleResp := func(msg *jsonrpcMessage) {
|
||||||
|
@ -266,7 +266,7 @@ func (c *jsonCodec) close() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closed returns a channel which will be closed when Close is called
|
// closed returns a channel which will be closed when Close is called
|
||||||
func (c *jsonCodec) closed() <-chan interface{} {
|
func (c *jsonCodec) closed() <-chan interface{} {
|
||||||
return c.closeCh
|
return c.closeCh
|
||||||
}
|
}
|
||||||
|
@ -235,10 +235,10 @@ func (c *mockConn) writeJSON(ctx context.Context, msg interface{}, isError bool)
|
|||||||
return c.enc.Encode(msg)
|
return c.enc.Encode(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closed returns a channel which is closed when the connection is closed.
|
// closed returns a channel which is closed when the connection is closed.
|
||||||
func (c *mockConn) closed() <-chan interface{} { return nil }
|
func (c *mockConn) closed() <-chan interface{} { return nil }
|
||||||
|
|
||||||
// RemoteAddr returns the peer address of the connection.
|
// remoteAddr returns the peer address of the connection.
|
||||||
func (c *mockConn) remoteAddr() string { return "" }
|
func (c *mockConn) remoteAddr() string { return "" }
|
||||||
|
|
||||||
// BenchmarkNotify benchmarks the performance of notifying a subscription.
|
// BenchmarkNotify benchmarks the performance of notifying a subscription.
|
||||||
|
@ -704,7 +704,7 @@ func formatPrimitiveValue(encType string, encValue interface{}) (string, error)
|
|||||||
return "", fmt.Errorf("unhandled type %v", encType)
|
return "", fmt.Errorf("unhandled type %v", encType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks if the types object is conformant to the specs
|
// validate checks if the types object is conformant to the specs
|
||||||
func (t Types) validate() error {
|
func (t Types) validate() error {
|
||||||
for typeKey, typeArr := range t {
|
for typeKey, typeArr := range t {
|
||||||
if len(typeKey) == 0 {
|
if len(typeKey) == 0 {
|
||||||
|
@ -671,7 +671,7 @@ func TestGnosisTypedDataWithChainId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGnosisCustomData tests the scenario where a user submits only the gnosis-safe
|
// TestGnosisCustomDataWithChainId tests the scenario where a user submits only the gnosis-safe
|
||||||
// specific data, and we fill the TypedData struct on our side
|
// specific data, and we fill the TypedData struct on our side
|
||||||
func TestGnosisCustomDataWithChainId(t *testing.T) {
|
func TestGnosisCustomDataWithChainId(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -108,7 +108,7 @@ type testFailure struct {
|
|||||||
reason string
|
reason string
|
||||||
}
|
}
|
||||||
|
|
||||||
// skipShortMode skips tests matching when the -short flag is used.
|
// slow adds expected slow tests matching the pattern.
|
||||||
func (tm *testMatcher) slow(pattern string) {
|
func (tm *testMatcher) slow(pattern string) {
|
||||||
tm.slowpat = append(tm.slowpat, regexp.MustCompile(pattern))
|
tm.slowpat = append(tm.slowpat, regexp.MustCompile(pattern))
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func TestRangeProof(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRangeProof tests normal range proof with two non-existent proofs.
|
// TestRangeProofWithNonExistentProof tests normal range proof with two non-existent proofs.
|
||||||
// The test cases are generated randomly.
|
// The test cases are generated randomly.
|
||||||
func TestRangeProofWithNonExistentProof(t *testing.T) {
|
func TestRangeProofWithNonExistentProof(t *testing.T) {
|
||||||
trie, vals := randomTrie(4096)
|
trie, vals := randomTrie(4096)
|
||||||
|
@ -1066,7 +1066,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
|
// BenchmarkHashFixedSize benchmarks the hash of a fixed number of updates to a trie.
|
||||||
// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
|
// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
|
||||||
// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
|
// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
|
||||||
// of thousand entries)
|
// of thousand entries)
|
||||||
|
@ -305,7 +305,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
|
|||||||
return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
|
return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastRoot returns the latest root hash, or empty if nothing is cached.
|
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||||
func (t *tester) lastHash() common.Hash {
|
func (t *tester) lastHash() common.Hash {
|
||||||
if len(t.roots) == 0 {
|
if len(t.roots) == 0 {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
|
@ -58,7 +58,7 @@ func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// root implements the layer interface, returning root hash of corresponding state.
|
// rootHash implements the layer interface, returning root hash of corresponding state.
|
||||||
func (dl *diskLayer) rootHash() common.Hash {
|
func (dl *diskLayer) rootHash() common.Hash {
|
||||||
return dl.root
|
return dl.root
|
||||||
}
|
}
|
||||||
@ -68,7 +68,7 @@ func (dl *diskLayer) stateID() uint64 {
|
|||||||
return dl.id
|
return dl.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// parent implements the layer interface, returning nil as there's no layer
|
// parentLayer implements the layer interface, returning nil as there's no layer
|
||||||
// below the disk.
|
// below the disk.
|
||||||
func (dl *diskLayer) parentLayer() layer {
|
func (dl *diskLayer) parentLayer() layer {
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user