les, light: remove support for les/1 4096 block CHT sections

This commit is contained in:
Zsolt Felfoldi 2019-04-05 17:40:03 +02:00
parent 5515f364ae
commit 64f9c1ea09
8 changed files with 26 additions and 68 deletions

@ -119,7 +119,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequencyClient, params.HelperTrieConfirmations)
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations)
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)

@ -135,8 +135,7 @@ func (b *benchmarkHelperTrie) init(pm *ProtocolManager, count int) error {
b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections()
} else {
b.sectionCount, _, _ = pm.server.chtIndexer.Sections()
b.sectionCount /= (params.CHTFrequencyClient / params.CHTFrequencyServer)
b.headNum = b.sectionCount*params.CHTFrequencyClient - 1
b.headNum = b.sectionCount*params.CHTFrequency - 1
}
if b.sectionCount == 0 {
return fmt.Errorf("no processed sections available")

@ -80,28 +80,16 @@ func (c *lesCommons) nodeInfo() interface{} {
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
if !c.protocolManager.lightSync {
// convert to client section size if running in server mode
sections /= c.iConfig.PairChtSize / c.iConfig.ChtSize
}
if sections2 < sections {
sections = sections2
}
if sections > 0 {
sectionIndex := sections - 1
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
var chtRoot common.Hash
if c.protocolManager.lightSync {
chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
} else {
idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
}
cht = params.TrustedCheckpoint{
SectionIndex: sectionIndex,
SectionHead: sectionHead,
CHTRoot: chtRoot,
CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead),
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
}
}

@ -1075,9 +1075,8 @@ func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.H
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
switch id {
case htCanonical:
idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.ChtSize-1)
return light.GetChtRoot(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
case htBloomBits:
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix

@ -342,43 +342,37 @@ func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
func testGetCHTProofs(t *testing.T, protocol int) {
config := light.TestServerIndexerConfig
frequency := config.ChtSize
if protocol == 2 { //qqq
frequency = config.PairChtSize
}
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
expectSections := frequency / config.ChtSize
for {
cs, _, _ := cIndexer.Sections()
bs, _, _ := bIndexer.Sections()
if cs >= expectSections && bs >= expectSections {
if cs >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
}
}
server, tearDown := newServerEnv(t, int(frequency+config.ChtConfirms), protocol, waitIndexers)
server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers)
defer tearDown()
bc := server.pm.blockchain.(*core.BlockChain)
// Assemble the proofs from the different protocols
header := bc.GetHeaderByNumber(frequency - 1)
header := bc.GetHeaderByNumber(config.ChtSize - 1)
rlp, _ := rlp.EncodeToBytes(header)
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, frequency-1)
binary.BigEndian.PutUint64(key, config.ChtSize-1)
proofsV2 := HelperTrieResps{
AuxData: [][]byte{rlp},
}
root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash())
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{
Type: htCanonical,
TrieIdx: frequency/config.PairChtSize - 1,
TrieIdx: 0,
Key: key,
AuxReq: auxHeader,
}}
@ -396,10 +390,8 @@ func TestGetBloombitsProofs(t *testing.T) {
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
for {
cs, _, _ := cIndexer.Sections()
bs, _, _ := bIndexer.Sections()
bts, _, _ := btIndexer.Sections()
if cs >= 8 && bs >= 8 && bts >= 1 {
if bts >= 1 {
break
}
time.Sleep(10 * time.Millisecond)

@ -89,7 +89,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
config: config,
chainDb: eth.ChainDb(),
iConfig: light.DefaultServerIndexerConfig,
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations),
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
protocolManager: pm,
},
@ -108,15 +108,11 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
srv.thcBlockProcessing = config.LightServ/100 + 1
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
if chtV2SectionCount != 0 {
// convert to LES/2 section
chtLastSection := chtV2SectionCount - 1
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead)
chtSectionCount, _, _ := srv.chtIndexer.Sections()
if chtSectionCount != 0 {
chtLastSection := chtSectionCount - 1
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSection)
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSection, chtSectionHead)
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
}
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()

@ -41,9 +41,6 @@ type IndexerConfig struct {
// The block frequency for creating CHTs.
ChtSize uint64
// A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
PairChtSize uint64
// The number of confirmations needed to generate/accept a canonical hash help trie.
ChtConfirms uint64
@ -64,8 +61,7 @@ type IndexerConfig struct {
var (
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
DefaultServerIndexerConfig = &IndexerConfig{
ChtSize: params.CHTFrequencyServer,
PairChtSize: params.CHTFrequencyClient,
ChtSize: params.CHTFrequency,
ChtConfirms: params.HelperTrieProcessConfirmations,
BloomSize: params.BloomBitsBlocks,
BloomConfirms: params.BloomConfirms,
@ -74,8 +70,7 @@ var (
}
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
DefaultClientIndexerConfig = &IndexerConfig{
ChtSize: params.CHTFrequencyClient,
PairChtSize: params.CHTFrequencyServer,
ChtSize: params.CHTFrequency,
ChtConfirms: params.HelperTrieConfirmations,
BloomSize: params.BloomBitsBlocksClient,
BloomConfirms: params.HelperTrieConfirmations,
@ -84,8 +79,7 @@ var (
}
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
TestServerIndexerConfig = &IndexerConfig{
ChtSize: 64,
PairChtSize: 512,
ChtSize: 512,
ChtConfirms: 4,
BloomSize: 64,
BloomConfirms: 4,
@ -95,7 +89,6 @@ var (
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
TestClientIndexerConfig = &IndexerConfig{
ChtSize: 512,
PairChtSize: 64,
ChtConfirms: 32,
BloomSize: 512,
BloomConfirms: 32,
@ -116,7 +109,7 @@ var (
ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
ErrNoHeader = errors.New("header not found")
chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
chtPrefix = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
ChtTablePrefix = "cht-"
)
@ -127,7 +120,6 @@ type ChtNode struct {
}
// GetChtRoot reads the CHT root associated to the given section from the database
// Note that sectionIdx is specified according to LES/1 CHT section size.
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
@ -136,7 +128,6 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
}
// StoreChtRoot writes the CHT root associated to the given section into the database
// Note that sectionIdx is specified according to LES/1 CHT section size.
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
@ -163,7 +154,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
sectionSize: size,
}
return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht")
}
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
@ -235,9 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
c.triedb.Commit(root, false)
if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
}
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
return nil
}

@ -32,13 +32,8 @@ const (
// considered probably final and its rotated bits are calculated.
BloomConfirms = 256
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
CHTFrequencyClient = 32768
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
// Eventually this can be merged back with the client version, but that requires a
// full database upgrade, so that should be left for a suitable moment.
CHTFrequencyServer = 4096
// CHTFrequency is the block frequency for creating CHTs
CHTFrequency = 32768
// BloomTrieFrequency is the block frequency for creating BloomTrie on both
// server/client sides.