Merge pull request #1146 from qinglin89/develop-upstream20

all: sync with upstream
This commit is contained in:
zjubfd 2022-10-31 14:05:09 +08:00 committed by GitHub
commit 6b83c41123
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 42 additions and 55 deletions

@ -184,7 +184,7 @@ HTTP based JSON-RPC API options:
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept websockets requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`)
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to

@ -296,12 +296,12 @@ var (
defaultVerifyMode = ethconfig.Defaults.TriesVerifyMode
TriesVerifyModeFlag = TextMarshalerFlag{
Name: "tries-verify-mode",
Usage: `tries verify mode:
"local(default): a normal full node with complete state world(both MPT and snapshot), merkle state root will
be verified against the block header.",
"full: a fast node with only snapshot state world. Merkle state root is verified by the trustworthy remote verify node
by comparing the diffhash(an identify of difflayer generated by the block) and state root.",
"insecure: same as full mode, except that it can tolerate without verifying the diffhash when verify node does not have it.",
Usage: `tries verify mode:
"local(default): a normal full node with complete state world(both MPT and snapshot), merkle state root will
be verified against the block header.",
"full: a fast node with only snapshot state world. Merkle state root is verified by the trustworthy remote verify node
by comparing the diffhash(an identify of difflayer generated by the block) and state root.",
"insecure: same as full mode, except that it can tolerate without verifying the diffhash when verify node does not have it.",
"none: no merkle state root verification at all, there is no need to setup or connect remote verify node at all,
it is more light comparing to full and insecure mode, but get a very small chance that the state is not consistent
with other peers."`,
@ -975,10 +975,11 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name):
urls = params.GoerliBootnodes
case cfg.BootstrapNodes != nil:
return // already set, don't apply defaults.
}
// don't apply defaults if BootstrapNodes is already set
if cfg.BootstrapNodes != nil {
return
}
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
for _, url := range urls {
if url != "" {

@ -25,6 +25,8 @@ func TestStorageSizeString(t *testing.T) {
size StorageSize
str string
}{
{2839274474874, "2.58 TiB"},
{2458492810, "2.29 GiB"},
{2381273, "2.27 MiB"},
{2192, "2.14 KiB"},
{12, "12.00 B"},
@ -36,3 +38,22 @@ func TestStorageSizeString(t *testing.T) {
}
}
}
func TestStorageSizeTerminalString(t *testing.T) {
tests := []struct {
size StorageSize
str string
}{
{2839274474874, "2.58TiB"},
{2458492810, "2.29GiB"},
{2381273, "2.27MiB"},
{2192, "2.14KiB"},
{12, "12.00B"},
}
for _, test := range tests {
if test.size.TerminalString() != test.str {
t.Errorf("%f: got %q, want %q", float64(test.size), test.size.TerminalString(), test.str)
}
}
}

@ -18,15 +18,11 @@ package snapshot
import (
"bytes"
"io/ioutil"
"os"
"testing"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/rlp"
)
@ -518,18 +514,7 @@ func TestDiskMidAccountPartialMerge(t *testing.T) {
// TestDiskSeek tests that seek-operations work on the disk layer
func TestDiskSeek(t *testing.T) {
// Create some accounts in the disk layer
var db ethdb.Database
if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil {
t.Fatal(err)
} else {
defer os.RemoveAll(dir)
diskdb, err := leveldb.New(dir, 256, 0, "", false)
if err != nil {
t.Fatal(err)
}
db = rawdb.NewDatabase(diskdb)
}
db := rawdb.NewMemoryDatabase()
// Fill even keys [0,2,4...]
for i := 0; i < 0xff; i += 2 {
acc := common.Hash{byte(i)}

@ -54,11 +54,7 @@ type fromBufFn = func(vm *goja.Runtime, buf goja.Value, allowString bool) ([]byt
func toBuf(vm *goja.Runtime, bufType goja.Value, val []byte) (goja.Value, error) {
// bufType is usually Uint8Array. This is equivalent to `new Uint8Array(val)` in JS.
res, err := vm.New(bufType, vm.ToValue(val))
if err != nil {
return nil, err
}
return vm.ToValue(res), nil
return vm.New(bufType, vm.ToValue(vm.NewArrayBuffer(val)))
}
func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString bool) ([]byte, error) {
@ -69,6 +65,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
break
}
return common.FromHex(obj.String()), nil
case "Array":
var b []byte
if err := vm.ExportTo(buf, &b); err != nil {
@ -80,10 +77,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
if !obj.Get("constructor").SameAs(bufType) {
break
}
var b []byte
if err := vm.ExportTo(buf, &b); err != nil {
return nil, err
}
b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes()
return b, nil
}
return nil, fmt.Errorf("invalid buffer type")
@ -775,7 +769,7 @@ func (co *contractObj) GetValue() goja.Value {
}
func (co *contractObj) GetInput() goja.Value {
input := co.contract.Input
input := common.CopyBytes(co.contract.Input)
res, err := co.toBuf(co.vm, input)
if err != nil {
co.vm.Interrupt(err)

@ -563,12 +563,6 @@ func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Byt
return crypto.PubkeyToAddress(*rpk), nil
}
// SignAndSendTransaction was renamed to SendTransaction. This method is deprecated
// and will be removed in the future. It primary goal is to give clients time to update.
func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) {
return s.SendTransaction(ctx, args, passwd)
}
// InitializeWallet initializes a new wallet at the provided URL, by generating and returning a new private key.
func (s *PrivateAccountAPI) InitializeWallet(ctx context.Context, url string) (string, error) {
wallet, err := s.am.Wallet(url)

@ -40,7 +40,6 @@ import (
type Backend interface {
BlockChain() *core.BlockChain
TxPool() *core.TxPool
StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error)
}
// Config is the configuration parameters of mining.

@ -786,16 +786,6 @@ func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase com
// Retrieve the parent state to execute on top and start a prefetcher for
// the miner to speed block sealing up a bit
state, err := w.chain.StateAtWithSharedPool(parent.Root())
if err != nil {
// Note since the sealing block can be created upon the arbitrary parent
// block, but the state of parent block may already be pruned, so the necessary
// state recovery is needed here in the future.
//
// The maximum acceptable reorg depth can be limited by the finalised block
// somehow. TODO(rjl493456442) fix the hard-coded number here later.
state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false)
log.Warn("Recovered mining state", "root", parent.Root(), "err", err)
}
if err != nil {
return nil, err
}

@ -48,7 +48,7 @@ func TestNewID(t *testing.T) {
func TestSubscriptions(t *testing.T) {
var (
namespaces = []string{"eth", "shh", "bzz"}
namespaces = []string{"eth", "bzz"}
service = &notificationTestService{}
subCount = len(namespaces)
notificationCount = 3

@ -40,6 +40,9 @@ func Fuzz(input []byte) int {
if len(input) == 0 {
return 0
}
if len(input) > 500*1024 {
return 0
}
var i int
{

@ -841,7 +841,7 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
delete(c.db.dirties, hash)
c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
if node.children != nil {
c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
}
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {