Merge pull request #825 from obscuren/develop

core: chain fork fix
This commit is contained in:
Jeffrey Wilcke 2015-04-29 04:47:26 -07:00
commit 764e81bf12
6 changed files with 117 additions and 35 deletions

@ -47,7 +47,7 @@ import _ "net/http/pprof"
const ( const (
ClientIdentifier = "Geth" ClientIdentifier = "Geth"
Version = "0.9.12" Version = "0.9.13"
) )
var ( var (

@ -76,6 +76,7 @@ type ChainManager struct {
// Last known total difficulty // Last known total difficulty
mu sync.RWMutex mu sync.RWMutex
tsmu sync.RWMutex tsmu sync.RWMutex
td *big.Int td *big.Int
currentBlock *types.Block currentBlock *types.Block
lastBlockHash common.Hash lastBlockHash common.Hash
@ -98,9 +99,8 @@ func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *Chai
eventMux: mux, eventMux: mux,
quit: make(chan struct{}), quit: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit), cache: NewBlockCache(blockCacheLimit),
currentGasLimit: new(big.Int),
} }
bc.setLastBlock() bc.setLastState()
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for _, hash := range badHashes { for _, hash := range badHashes {
@ -145,7 +145,7 @@ func (bc *ChainManager) SetHead(head *types.Block) {
bc.transState = statedb.Copy() bc.transState = statedb.Copy()
bc.setTotalDifficulty(head.Td) bc.setTotalDifficulty(head.Td)
bc.insert(head) bc.insert(head)
bc.setLastBlock() bc.setLastState()
} }
func (self *ChainManager) Td() *big.Int { func (self *ChainManager) Td() *big.Int {
@ -212,7 +212,7 @@ func (self *ChainManager) setTransState(statedb *state.StateDB) {
self.transState = statedb self.transState = statedb
} }
func (bc *ChainManager) setLastBlock() { func (bc *ChainManager) setLastState() {
data, _ := bc.blockDb.Get([]byte("LastBlock")) data, _ := bc.blockDb.Get([]byte("LastBlock"))
if len(data) != 0 { if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data)) block := bc.GetBlock(common.BytesToHash(data))
@ -224,6 +224,7 @@ func (bc *ChainManager) setLastBlock() {
} else { } else {
bc.Reset() bc.Reset()
} }
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) { if glog.V(logger.Info) {
glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td) glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
@ -319,6 +320,7 @@ func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
bc.insert(bc.genesisBlock) bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock bc.currentBlock = bc.genesisBlock
bc.makeCache() bc.makeCache()
bc.td = gb.Difficulty()
} }
// Export writes the active chain to the given writer. // Export writes the active chain to the given writer.
@ -346,8 +348,6 @@ func (self *ChainManager) Export(w io.Writer) error {
func (bc *ChainManager) insert(block *types.Block) { func (bc *ChainManager) insert(block *types.Block) {
key := append(blockNumPre, block.Number().Bytes()...) key := append(blockNumPre, block.Number().Bytes()...)
bc.blockDb.Put(key, block.Hash().Bytes()) bc.blockDb.Put(key, block.Hash().Bytes())
// Push block to cache
bc.cache.Push(block)
bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes()) bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
bc.currentBlock = block bc.currentBlock = block
@ -358,6 +358,8 @@ func (bc *ChainManager) write(block *types.Block) {
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block)) enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
key := append(blockHashPre, block.Hash().Bytes()...) key := append(blockHashPre, block.Hash().Bytes()...)
bc.blockDb.Put(key, enc) bc.blockDb.Put(key, enc)
// Push block to cache
bc.cache.Push(block)
} }
// Accessors // Accessors
@ -552,16 +554,17 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
// Compare the TD of the last known block in the canonical chain to make sure it's greater. // Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain. // At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Td.Cmp(self.td) > 0 { if block.Td.Cmp(self.td) > 0 {
//if block.Header().Number.Cmp(new(big.Int).Add(cblock.Header().Number, common.Big1)) < 0 { // Check for chain forks. If H(block.num - 1) != block.parent, we're on a fork and need to do some merging
if block.Number().Cmp(cblock.Number()) <= 0 { if previous := self.getBlockByNumber(block.NumberU64() - 1); previous.Hash() != block.ParentHash() {
chash := cblock.Hash() chash := cblock.Hash()
hash := block.Hash() hash := block.Hash()
if glog.V(logger.Info) { if glog.V(logger.Info) {
glog.Infof("Split detected. New head #%v (%x) TD=%v, was #%v (%x) TD=%v\n", block.Header().Number, hash[:4], block.Td, cblock.Header().Number, chash[:4], self.td) glog.Infof("Split detected. New head #%v (%x) TD=%v, was #%v (%x) TD=%v\n", block.Header().Number, hash[:4], block.Td, cblock.Header().Number, chash[:4], self.td)
} }
// during split we merge two different chains and create the new canonical chain // during split we merge two different chains and create the new canonical chain
self.merge(self.getBlockByNumber(block.NumberU64()), block) self.merge(previous, block)
queue[i] = ChainSplitEvent{block, logs} queue[i] = ChainSplitEvent{block, logs}
queueEvent.splitCount++ queueEvent.splitCount++
@ -587,16 +590,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4]) glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
} }
} else { } else {
if glog.V(logger.Detail) {
glog.Infof("inserted forked block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
}
queue[i] = ChainSideEvent{block, logs} queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++ queueEvent.sideCount++
} }
self.futureBlocks.Delete(block.Hash())
} }
self.mu.Unlock() self.mu.Unlock()
stats.processed++ stats.processed++
self.futureBlocks.Delete(block.Hash())
} }
if (stats.queued > 0 || stats.processed > 0) && bool(glog.V(logger.Info)) { if (stats.queued > 0 || stats.processed > 0) && bool(glog.V(logger.Info)) {
@ -610,33 +616,38 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
return nil return nil
} }
// merge takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them // diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain. // to be part of the new canonical chain.
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) { func (self *ChainManager) diff(oldBlock, newBlock *types.Block) types.Blocks {
glog.V(logger.Debug).Infof("Applying diff to %x & %x\n", oldBlock.Hash().Bytes()[:4], newBlock.Hash().Bytes()[:4]) glog.V(logger.Debug).Infof("Applying diff to %x & %x\n", oldBlock.Hash().Bytes()[:4], newBlock.Hash().Bytes()[:4])
var oldChain, newChain types.Blocks var newChain types.Blocks
// First find the split (common ancestor) so we can perform an adequate merge // first find common number
for newBlock = newBlock; newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
newChain = append(newChain, newBlock)
}
glog.V(logger.Debug).Infoln("Found common number", newBlock.Number())
for { for {
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
if oldBlock.Hash() == newBlock.Hash() { if oldBlock.Hash() == newBlock.Hash() {
break break
} }
oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock) newChain = append(newChain, newBlock)
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
} }
return newChain
}
// merge merges two different chain to the new canonical chain
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) {
newChain := self.diff(oldBlock, newBlock)
// insert blocks // insert blocks
for _, block := range newChain { for _, block := range newChain {
self.insert(block) self.insert(block)
} }
if glog.V(logger.Detail) {
for i, oldBlock := range oldChain {
glog.Infof("- %.10v = %x\n", oldBlock.Number(), oldBlock.Hash())
glog.Infof("+ %.10v = %x\n", newChain[i].Number(), newChain[i].Hash())
}
}
} }
func (self *ChainManager) update() { func (self *ChainManager) update() {

@ -9,6 +9,7 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -56,12 +57,14 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
} }
// Compare difficulties // Compare difficulties
f(tdpre, td) f(tdpre, td)
// Loop over parents making sure reconstruction is done properly
} }
func printChain(bc *ChainManager) { func printChain(bc *ChainManager) {
for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- {
b := bc.GetBlockByNumber(uint64(i)) b := bc.GetBlockByNumber(uint64(i))
fmt.Printf("\t%x\n", b.Hash()) fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty())
} }
} }
@ -344,3 +347,50 @@ func TestGetAncestors(t *testing.T) {
ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4) ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4)
fmt.Println(ancestors) fmt.Println(ancestors)
} }
type bproc struct{}
func (bproc) Process(*types.Block) (state.Logs, error) { return nil, nil }
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
var chain []*types.Block
for i, difficulty := range d {
header := &types.Header{Number: big.NewInt(int64(i + 1)), Difficulty: big.NewInt(int64(difficulty))}
block := types.NewBlockWithHeader(header)
copy(block.HeaderHash[:2], []byte{byte(i + 1), seed})
if i == 0 {
block.ParentHeaderHash = genesis.Hash()
} else {
copy(block.ParentHeaderHash[:2], []byte{byte(i), seed})
}
chain = append(chain, block)
}
return chain
}
func TestReorg(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
var eventMux event.TypeMux
genesis := GenesisBlock(db)
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux}
bc.cache = NewBlockCache(100)
bc.futureBlocks = NewBlockCache(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
bc.txState = state.ManageState(bc.State())
chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain1)
bc.InsertChain(chain2)
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
}
}
}

@ -351,7 +351,7 @@ func (self *Block) Copy() *Block {
} }
func (self *Block) String() string { func (self *Block) String() string {
return fmt.Sprintf(`Block(#%v): Size: %v TD: %v { str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
MinerHash: %x MinerHash: %x
%v %v
Transactions: Transactions:
@ -360,6 +360,16 @@ Uncles:
%v %v
} }
`, self.Number(), self.Size(), self.Td, self.header.HashNoNonce(), self.header, self.transactions, self.uncles) `, self.Number(), self.Size(), self.Td, self.header.HashNoNonce(), self.header, self.transactions, self.uncles)
if (self.HeaderHash != common.Hash{}) {
str += fmt.Sprintf("\nFake hash = %x", self.HeaderHash)
}
if (self.ParentHeaderHash != common.Hash{}) {
str += fmt.Sprintf("\nFake parent hash = %x", self.ParentHeaderHash)
}
return str
} }
func (self *Header) String() string { func (self *Header) String() string {

@ -436,6 +436,8 @@ func (d *Downloader) process(peer *peer) error {
if err != nil && core.IsParentErr(err) { if err != nil && core.IsParentErr(err) {
glog.V(logger.Debug).Infoln("Aborting process due to missing parent.") glog.V(logger.Debug).Infoln("Aborting process due to missing parent.")
// XXX this needs a lot of attention
blocks = nil
break break
} else if err != nil { } else if err != nil {
// immediatly unregister the false peer but do not disconnect // immediatly unregister the false peer but do not disconnect
@ -472,3 +474,7 @@ func (d *Downloader) isProcessing() bool {
func (d *Downloader) isBusy() bool { func (d *Downloader) isBusy() bool {
return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing() return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing()
} }
func (d *Downloader) IsBusy() bool {
return d.isBusy()
}

@ -163,6 +163,11 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
if peer.td.Cmp(pm.chainman.Td()) <= 0 { if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return return
} }
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if pm.downloader.IsBusy() {
return
}
glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td) glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
// Get the hashes from the peer (synchronously) // Get the hashes from the peer (synchronously)