all: remove the duplicate 'the' in annotations (#17509)
This commit is contained in:
parent
70398d300d
commit
d1aa605f1e
@ -207,7 +207,7 @@ func bindTypeGo(kind abi.Type) string {
|
|||||||
|
|
||||||
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
||||||
// (Or just the type itself if it is not an array or slice)
|
// (Or just the type itself if it is not an array or slice)
|
||||||
// The length of the matched part is returned, with the the translated type.
|
// The length of the matched part is returned, with the translated type.
|
||||||
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
@ -255,7 +255,7 @@ func bindTypeJava(kind abi.Type) string {
|
|||||||
|
|
||||||
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
||||||
// (Or just the type itself if it is not an array or slice)
|
// (Or just the type itself if it is not an array or slice)
|
||||||
// The length of the matched part is returned, with the the translated type.
|
// The length of the matched part is returned, with the translated type.
|
||||||
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -754,7 +754,7 @@ func extractIDFromEnode(s string) []byte {
|
|||||||
return n.ID[:]
|
return n.ID[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// obfuscateBloom adds 16 random bits to the the bloom
|
// obfuscateBloom adds 16 random bits to the bloom
|
||||||
// filter, in order to obfuscate the containing topics.
|
// filter, in order to obfuscate the containing topics.
|
||||||
// it does so deterministically within every session.
|
// it does so deterministically within every session.
|
||||||
// despite additional bits, it will match on average
|
// despite additional bits, it will match on average
|
||||||
|
@ -117,7 +117,7 @@ func TestDecodingCycle(t *testing.T) {
|
|||||||
// TestCompression tests that compression works by returning either the bitset
|
// TestCompression tests that compression works by returning either the bitset
|
||||||
// encoded input, or the actual input if the bitset version is longer.
|
// encoded input, or the actual input if the bitset version is longer.
|
||||||
func TestCompression(t *testing.T) {
|
func TestCompression(t *testing.T) {
|
||||||
// Check the the compression returns the bitset encoding is shorter
|
// Check the compression returns the bitset encoding is shorter
|
||||||
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
||||||
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ func TestCompression(t *testing.T) {
|
|||||||
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
||||||
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
||||||
}
|
}
|
||||||
// Check the the compression returns the input if the bitset encoding is longer
|
// Check the compression returns the input if the bitset encoding is longer
|
||||||
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||||
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ type UserPrompter interface {
|
|||||||
// choice to be made, returning that choice.
|
// choice to be made, returning that choice.
|
||||||
PromptConfirm(prompt string) (bool, error)
|
PromptConfirm(prompt string) (bool, error)
|
||||||
|
|
||||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
// SetHistory sets the input scrollback history that the prompter will allow
|
||||||
// the user to scroll back to.
|
// the user to scroll back to.
|
||||||
SetHistory(history []string)
|
SetHistory(history []string)
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
// SetHistory sets the input scrollback history that the prompter will allow
|
||||||
// the user to scroll back to.
|
// the user to scroll back to.
|
||||||
func (p *terminalPrompter) SetHistory(history []string) {
|
func (p *terminalPrompter) SetHistory(history []string) {
|
||||||
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
||||||
|
@ -45,7 +45,7 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
|
|||||||
return validator
|
return validator
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateBody validates the given block's uncles and verifies the the block
|
// ValidateBody validates the given block's uncles and verifies the block
|
||||||
// header's transaction and uncle roots. The headers are assumed to be already
|
// header's transaction and uncle roots. The headers are assumed to be already
|
||||||
// validated at this point.
|
// validated at this point.
|
||||||
func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||||
|
@ -61,7 +61,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||||||
allLogs []*types.Log
|
allLogs []*types.Log
|
||||||
gp = new(GasPool).AddGas(block.GasLimit())
|
gp = new(GasPool).AddGas(block.GasLimit())
|
||||||
)
|
)
|
||||||
// Mutate the the block and state according to any hard-fork specs
|
// Mutate the block and state according to any hard-fork specs
|
||||||
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
||||||
misc.ApplyDAOHardFork(statedb)
|
misc.ApplyDAOHardFork(statedb)
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ type AccountRef common.Address
|
|||||||
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
|
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
|
||||||
|
|
||||||
// Contract represents an ethereum contract in the state database. It contains
|
// Contract represents an ethereum contract in the state database. It contains
|
||||||
// the the contract code, calling arguments. Contract implements ContractRef
|
// the contract code, calling arguments. Contract implements ContractRef
|
||||||
type Contract struct {
|
type Contract struct {
|
||||||
// CallerAddress is the result of the caller which initialised this
|
// CallerAddress is the result of the caller which initialised this
|
||||||
// contract. However when the "call method" is delegated this value
|
// contract. However when the "call method" is delegated this value
|
||||||
|
@ -86,7 +86,7 @@ func Sign(msg []byte, seckey []byte) ([]byte, error) {
|
|||||||
return sig, nil
|
return sig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecoverPubkey returns the the public key of the signer.
|
// RecoverPubkey returns the public key of the signer.
|
||||||
// msg must be the 32-byte hash of the message to be signed.
|
// msg must be the 32-byte hash of the message to be signed.
|
||||||
// sig must be a 65-byte compact ECDSA signature containing the
|
// sig must be a 65-byte compact ECDSA signature containing the
|
||||||
// recovery id as the last element.
|
// recovery id as the last element.
|
||||||
|
@ -662,7 +662,7 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
|||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
// Add the peer to the expiry report along the the number of failed requests
|
// Add the peer to the expiry report along the number of failed requests
|
||||||
expiries[id] = len(request.Headers)
|
expiries[id] = len(request.Headers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enqueue tries to fill gaps the the fetcher's future import queue.
|
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||||
func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||||
op := &inject{
|
op := &inject{
|
||||||
origin: peer,
|
origin: peer,
|
||||||
|
@ -77,7 +77,7 @@ type Config struct {
|
|||||||
// Disabling is useful for protocol debugging (manual topology).
|
// Disabling is useful for protocol debugging (manual topology).
|
||||||
NoDiscovery bool
|
NoDiscovery bool
|
||||||
|
|
||||||
// DiscoveryV5 specifies whether the the new topic-discovery based V5 discovery
|
// DiscoveryV5 specifies whether the new topic-discovery based V5 discovery
|
||||||
// protocol should be started or not.
|
// protocol should be started or not.
|
||||||
DiscoveryV5 bool `toml:",omitempty"`
|
DiscoveryV5 bool `toml:",omitempty"`
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
|
|||||||
|
|
||||||
// Resolve resolves address by choosing a Resolver by TLD.
|
// Resolve resolves address by choosing a Resolver by TLD.
|
||||||
// If there are more default Resolvers, or for a specific TLD,
|
// If there are more default Resolvers, or for a specific TLD,
|
||||||
// the Hash from the the first one which does not return error
|
// the Hash from the first one which does not return error
|
||||||
// will be returned.
|
// will be returned.
|
||||||
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
|
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
|
||||||
rs, err := m.getResolveValidator(addr)
|
rs, err := m.getResolveValidator(addr)
|
||||||
|
@ -133,7 +133,7 @@ As part of the deletion protocol then, hashes of insured chunks to be removed ar
|
|||||||
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store.
|
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store.
|
||||||
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no
|
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no
|
||||||
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches
|
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches
|
||||||
the latest state. Past intervals of historical syncing are checked via the the session root.
|
the latest state. Past intervals of historical syncing are checked via the session root.
|
||||||
Upstream peer signs the states, downstream peers can use as handover proofs.
|
Upstream peer signs the states, downstream peers can use as handover proofs.
|
||||||
Downstream peers sign off on a state together with an initial offset.
|
Downstream peers sign off on a state together with an initial offset.
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ func (i *Intervals) add(start, end uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge adds all the intervals from the the m Interval to current one.
|
// Merge adds all the intervals from the m Interval to current one.
|
||||||
func (i *Intervals) Merge(m *Intervals) {
|
func (i *Intervals) Merge(m *Intervals) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
|
@ -182,7 +182,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
conf.addrToIDMap[string(a)] = n
|
conf.addrToIDMap[string(a)] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
//get the the node at that index
|
//get the node at that index
|
||||||
//this is the node selected for upload
|
//this is the node selected for upload
|
||||||
node := sim.RandomUpNode()
|
node := sim.RandomUpNode()
|
||||||
item, ok := sim.NodeItem(node.ID, bucketKeyStore)
|
item, ok := sim.NodeItem(node.ID, bucketKeyStore)
|
||||||
|
@ -291,7 +291,7 @@ func (w *Whisper) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
|||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasKeyPair checks if the the whisper node is configured with the private key
|
// HasKeyPair checks if the whisper node is configured with the private key
|
||||||
// of the specified public pair.
|
// of the specified public pair.
|
||||||
func (w *Whisper) HasKeyPair(id string) bool {
|
func (w *Whisper) HasKeyPair(id string) bool {
|
||||||
w.keyMu.RLock()
|
w.keyMu.RLock()
|
||||||
|
@ -423,7 +423,7 @@ func (whisper *Whisper) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
|||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasKeyPair checks if the the whisper node is configured with the private key
|
// HasKeyPair checks if the whisper node is configured with the private key
|
||||||
// of the specified public pair.
|
// of the specified public pair.
|
||||||
func (whisper *Whisper) HasKeyPair(id string) bool {
|
func (whisper *Whisper) HasKeyPair(id string) bool {
|
||||||
whisper.keyMu.RLock()
|
whisper.keyMu.RLock()
|
||||||
|
Loading…
Reference in New Issue
Block a user