all: finish integrating Go ethash, delete C++ vendor

This commit is contained in:
Péter Szilágyi 2017-03-06 17:20:25 +02:00 committed by Felix Lange
parent df72e20cc5
commit b7d93500f1
20 changed files with 230 additions and 800 deletions

@ -95,6 +95,7 @@ func init() {
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.FastSyncFlag,
utils.LightModeFlag,
@ -111,7 +112,6 @@ func init() {
utils.GasPriceFlag,
utils.MinerThreadsFlag,
utils.MiningEnabledFlag,
utils.AutoDAGFlag,
utils.TargetGasLimitFlag,
utils.NATFlag,
utils.NoDiscoverFlag,

@ -25,10 +25,10 @@ import (
"strconv"
"strings"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"gopkg.in/urfave/cli.v1"
)
@ -87,7 +87,7 @@ func makedag(ctx *cli.Context) error {
utils.Fatalf("Can't find dir")
}
fmt.Println("making DAG, this could take awhile...")
ethash.MakeDAG(blockNum, dir)
pow.MakeDataset(blockNum, dir)
}
default:
wrongArgs()

@ -84,6 +84,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
},
},
@ -141,7 +142,6 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.MiningEnabledFlag,
utils.MinerThreadsFlag,
utils.AutoDAGFlag,
utils.EtherbaseFlag,
utils.TargetGasLimitFlag,
utils.GasPriceFlag,

@ -132,9 +132,14 @@ var (
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
}
EthashDatasetsInMemoryFlag = cli.IntFlag{
Name: "ethash.dagsinmem",
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
Value: 1,
}
EthashDatasetsOnDiskFlag = cli.IntFlag{
Name: "ethash.dagsondisk",
Usage: "Number of ethash mining DAGs to keep on disk (1+GB each)",
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
Value: 2,
}
NetworkIdFlag = cli.IntFlag{
@ -207,10 +212,6 @@ var (
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
Value: params.GenesisGasLimit.Uint64(),
}
AutoDAGFlag = cli.BoolFlag{
Name: "autodag",
Usage: "Enable automatic DAG pregeneration",
}
EtherbaseFlag = cli.StringFlag{
Name: "etherbase",
Usage: "Public address for block mining rewards (default = first account created)",
@ -809,8 +810,8 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
EthashCachesInMem: ctx.GlobalInt(EthashCachesInMemoryFlag.Name),
EthashCachesOnDisk: ctx.GlobalInt(EthashCachesOnDiskFlag.Name),
EthashDatasetDir: MakeEthashDatasetDir(ctx),
EthashDatasetsInMem: ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name),
EthashDatasetsOnDisk: ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name),
}
@ -982,7 +983,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
seal := pow.PoW(pow.FakePow{})
if !ctx.GlobalBool(FakePoWFlag.Name) {
seal = pow.NewFullEthash("", 1, 0, "", 0)
seal = pow.NewFullEthash("", 1, 0, "", 1, 0)
}
chain, err = core.NewBlockChain(chainDb, chainConfig, seal, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)})
if err != nil {

@ -29,7 +29,6 @@ import (
"strings"
"time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
@ -40,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
@ -137,7 +135,6 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
// Start the miner with the given number of threads. If threads is nil the number of
// workers started is equal to the number of logical CPU's that are usable by this process.
func (s *PrivateMinerAPI) Start(threads *int) (bool, error) {
s.e.StartAutoDAG()
var err error
if threads == nil {
err = s.e.StartMining(runtime.NumCPU())
@ -173,25 +170,9 @@ func (s *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
return true
}
// StartAutoDAG starts auto DAG generation. This will prevent the DAG generating on epoch change
// which will cause the node to stop mining during the generation process.
func (s *PrivateMinerAPI) StartAutoDAG() bool {
s.e.StartAutoDAG()
return true
}
// StopAutoDAG stops auto DAG generation
func (s *PrivateMinerAPI) StopAutoDAG() bool {
s.e.StopAutoDAG()
return true
}
// MakeDAG creates the new DAG for the given block number
func (s *PrivateMinerAPI) MakeDAG(blockNr rpc.BlockNumber) (bool, error) {
if err := ethash.MakeDAG(uint64(blockNr.Int64()), ""); err != nil {
return false, err
}
return true, nil
// GetHashrate returns the current hashrate of the miner.
func (s *PrivateMinerAPI) GetHashrate() uint64 {
return uint64(s.e.miner.HashRate())
}
// PrivateAdminAPI is the collection of Etheruem full node-related APIs

@ -21,14 +21,11 @@ import (
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -78,7 +75,6 @@ type Config struct {
DatabaseHandles int
DocRoot string
AutoDAG bool
PowFake bool
PowTest bool
PowShared bool
@ -88,6 +84,7 @@ type Config struct {
EthashCachesInMem int
EthashCachesOnDisk int
EthashDatasetDir string
EthashDatasetsInMem int
EthashDatasetsOnDisk int
Etherbase common.Address
@ -138,8 +135,6 @@ type Ethereum struct {
miner *miner.Miner
Mining bool
MinerThreads int
AutoDAG bool
autodagquit chan bool
etherbase common.Address
solcPath string
@ -173,7 +168,6 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
netVersionId: config.NetworkId,
etherbase: config.Etherbase,
MinerThreads: config.MinerThreads,
AutoDAG: config.AutoDAG,
solcPath: config.SolcPath,
}
@ -298,7 +292,7 @@ func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
return pow.NewSharedEthash()
default:
return pow.NewFullEthash(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
config.EthashDatasetDir, config.EthashDatasetsOnDisk)
config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
}
}
@ -414,9 +408,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
// Ethereum protocol implementation.
func (s *Ethereum) Start(srvr *p2p.Server) error {
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion())
if s.AutoDAG {
s.StartAutoDAG()
}
s.protocolManager.Start()
if s.lesServer != nil {
s.lesServer.Start(srvr)
@ -439,8 +431,6 @@ func (s *Ethereum) Stop() error {
s.miner.Stop()
s.eventMux.Stop()
s.StopAutoDAG()
s.chainDb.Close()
close(s.shutdownChan)
@ -451,74 +441,3 @@ func (s *Ethereum) Stop() error {
func (s *Ethereum) WaitForShutdown() {
<-s.shutdownChan
}
// StartAutoDAG() spawns a go routine that checks the DAG every autoDAGcheckInterval
// by default that is 10 times per epoch
// in epoch n, if we past autoDAGepochHeight within-epoch blocks,
// it calls ethash.MakeDAG to pregenerate the DAG for the next epoch n+1
// if it does not exist yet as well as remove the DAG for epoch n-1
// the loop quits if autodagquit channel is closed, it can safely restart and
// stop any number of times.
// For any more sophisticated pattern of DAG generation, use CLI subcommand
// makedag
func (self *Ethereum) StartAutoDAG() {
if self.autodagquit != nil {
return // already started
}
go func() {
log.Info("Pre-generation of ethash DAG on", "dir", ethash.DefaultDir)
var nextEpoch uint64
timer := time.After(0)
self.autodagquit = make(chan bool)
for {
select {
case <-timer:
log.Info("Checking DAG availability", "dir", ethash.DefaultDir)
currentBlock := self.BlockChain().CurrentBlock().NumberU64()
thisEpoch := currentBlock / epochLength
if nextEpoch <= thisEpoch {
if currentBlock%epochLength > autoDAGepochHeight {
if thisEpoch > 0 {
previousDag, previousDagFull := dagFiles(thisEpoch - 1)
os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
log.Info("Removed previous DAG", "epoch", thisEpoch-1, "dag", previousDag)
}
nextEpoch = thisEpoch + 1
dag, _ := dagFiles(nextEpoch)
if _, err := os.Stat(dag); os.IsNotExist(err) {
log.Info("Pre-generating next DAG", "epoch", nextEpoch, "dag", dag)
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
if err != nil {
log.Error("Error generating DAG", "epoch", nextEpoch, "dag", dag, "err", err)
return
}
} else {
log.Warn("DAG already exists", "epoch", nextEpoch, "dag", dag)
}
}
}
timer = time.After(autoDAGcheckInterval)
case <-self.autodagquit:
return
}
}
}()
}
// stopAutoDAG stops automatic DAG pregeneration by quitting the loop
func (self *Ethereum) StopAutoDAG() {
if self.autodagquit != nil {
close(self.autodagquit)
self.autodagquit = nil
}
log.Info("Pre-generation of ethash DAG off", "dir", ethash.DefaultDir)
}
// dagFiles(epoch) returns the two alternative DAG filenames (not a path)
// 1) <revision>-<hex(seedhash[8])> 2) full-R<revision>-<hex(seedhash[8])>
func dagFiles(epoch uint64) (string, string) {
seedHash, _ := ethash.GetSeedHash(epoch * epochLength)
dag := fmt.Sprintf("full-R%d-%x", ethashRevision, seedHash[:8])
return dag, "full-R" + dag
}

@ -399,20 +399,8 @@ web3._extend({
inputFormatter: [web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'startAutoDAG',
call: 'miner_startAutoDAG',
params: 0
}),
new web3._extend.Method({
name: 'stopAutoDAG',
call: 'miner_stopAutoDAG',
params: 0
}),
new web3._extend.Method({
name: 'makeDAG',
call: 'miner_makeDAG',
params: 1,
inputFormatter: [web3._extend.formatters.inputDefaultBlockNumberFormatter]
name: 'getHashrate',
call: 'miner_getHashrate'
})
],
properties: []

@ -164,7 +164,9 @@ func (self *Miner) HashRate() (tot int64) {
// aspects of the worker/locking up agents so we can get an accurate
// hashrate?
for agent := range self.worker.agents {
tot += agent.GetHashRate()
if _, ok := agent.(*CpuAgent); !ok {
tot += agent.GetHashRate()
}
}
return
}

@ -151,6 +151,9 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
GpobaseStepDown: 10,
GpobaseStepUp: 100,
GpobaseCorrectionFactor: 110,
EthashCacheDir: "ethash",
EthashCachesInMem: 2,
EthashCachesOnDisk: 3,
}
if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return les.New(ctx, ethConf)

@ -22,7 +22,9 @@ import (
"errors"
"fmt"
"io/ioutil"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
"sync"
@ -46,7 +48,7 @@ var (
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash = NewFullEthash("", 3, 0, "", 0)
sharedEthash = NewFullEthash("", 3, 0, "", 1, 0)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@ -64,13 +66,13 @@ type cache struct {
lock sync.Mutex // Ensures thread safety for updating the usage time
}
// generate ensures that the cache content is generates.
// generate ensures that the cache content is generated before use.
func (c *cache) generate(dir string, limit int, test bool) {
c.once.Do(func() {
// If we have a testing cache, generate and return
if test {
rawCache := generateCache(1024, seedHash(c.epoch*epochLength+1))
c.cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
c.cache = prepare(1024, bytes.NewReader(rawCache))
return
}
// Full cache generation is needed, check cache dir for existing data
@ -117,25 +119,112 @@ func (c *cache) generate(dir string, limit int, test bool) {
})
}
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
type dataset struct {
epoch uint64 // Epoch for which this cache is relevant
dataset []uint32 // The actual cache data content
used time.Time // Timestamp of the last use for smarter eviction
once sync.Once // Ensures the cache is generated only once
lock sync.Mutex // Ensures thread safety for updating the usage time
}
// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, test bool, discard bool) {
d.once.Do(func() {
// If we have a testing dataset, generate and return
if test {
rawCache := generateCache(1024, seedHash(d.epoch*epochLength+1))
intCache := prepare(1024, bytes.NewReader(rawCache))
rawDataset := generateDataset(32*1024, intCache)
d.dataset = prepare(32*1024, bytes.NewReader(rawDataset))
return
}
// Full dataset generation is needed, check dataset dir for existing data
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
logger := log.New("seed", hexutil.Bytes(seed))
if dir != "" {
dump, err := os.Open(path)
if err == nil {
if !discard {
logger.Info("Loading ethash DAG from disk")
start := time.Now()
d.dataset = prepare(dsize, bufio.NewReader(dump))
logger.Info("Loaded ethash DAG from disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
dump.Close()
return
}
}
// No previous disk dataset was available, generate on the fly
rawCache := generateCache(csize, seed)
intCache := prepare(csize, bytes.NewReader(rawCache))
rawDataset := generateDataset(dsize, intCache)
if !discard {
d.dataset = prepare(dsize, bytes.NewReader(rawDataset))
}
// If a dataset directory is given, attempt to serialize for next time
if dir != "" {
// Store the ethash dataset to disk
start := time.Now()
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
logger.Error("Failed to create ethash DAG dir", "err", err)
} else if err := ioutil.WriteFile(path, rawDataset, os.ModePerm); err != nil {
logger.Error("Failed to write ethash DAG to disk", "err", err)
} else {
logger.Info("Stored ethash DAG to disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
// Iterate over all previous instances and delete old ones
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
os.Remove(path)
}
}
})
}
// MakeCache generates a new ethash cache and optionally stores it to disk.
func MakeCache(block uint64, dir string) {
c := cache{epoch: block/epochLength + 1}
c.generate(dir, math.MaxInt32, false)
}
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
func MakeDataset(block uint64, dir string) {
d := dataset{epoch: block/epochLength + 1}
d.generate(dir, math.MaxInt32, false, true)
}
// Ethash is a PoW data struture implementing the ethash algorithm.
type Ethash struct {
cachedir string // Data directory to store the verification caches
cachesinmem int // Number of caches to keep in memory
cachesondisk int // Number of caches to keep on disk
dagdir string // Data directory to store full mining datasets
dagsinmem int // Number of mining datasets to keep in memory
dagsondisk int // Number of mining datasets to keep on disk
caches map[uint64]*cache // In memory caches to avoid regenerating too often
future *cache // Pre-generated cache for the estimated future epoch
lock sync.Mutex // Ensures thread safety for the in-memory caches
caches map[uint64]*cache // In memory caches to avoid regenerating too often
fcache *cache // Pre-generated cache for the estimated future epoch
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
fdataset *dataset // Pre-generated dataset for the estimated future epoch
lock sync.Mutex // Ensures thread safety for the in-memory caches
hashrate *metrics.StandardMeter // Meter tracking the average hashrate
hashrate metrics.Meter // Meter tracking the average hashrate
tester bool // Flag whether to use a smaller test dataset
}
// NewFullEthash creates a full sized ethash PoW scheme.
func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsondisk int) PoW {
func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) PoW {
if cachesinmem <= 0 {
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
cachesinmem = 1
@ -151,8 +240,11 @@ func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string
cachesinmem: cachesinmem,
cachesondisk: cachesondisk,
dagdir: dagdir,
dagsinmem: dagsinmem,
dagsondisk: dagsondisk,
caches: make(map[uint64]*cache),
datasets: make(map[uint64]*dataset),
hashrate: metrics.NewMeter(),
}
}
@ -162,7 +254,9 @@ func NewTestEthash() PoW {
return &Ethash{
cachesinmem: 1,
caches: make(map[uint64]*cache),
datasets: make(map[uint64]*dataset),
tester: true,
hashrate: metrics.NewMeter(),
}
}
@ -181,7 +275,7 @@ func (ethash *Ethash) Verify(block Block) error {
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
return ErrNonceOutOfRange
}
// Ensure twe have a valid difficulty for the block
// Ensure that we have a valid difficulty for the block
difficulty := block.Difficulty()
if difficulty.Sign() <= 0 {
return ErrInvalidDifficulty
@ -228,9 +322,9 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
log.Debug("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.future != nil && ethash.future.epoch == epoch {
if ethash.fcache != nil && ethash.fcache.epoch == epoch {
log.Debug("Using pre-generated cache", "epoch", epoch)
current, ethash.future = ethash.future, nil
current, ethash.fcache = ethash.fcache, nil
} else {
log.Debug("Requiring new ethash cache", "epoch", epoch)
current = &cache{epoch: epoch}
@ -238,10 +332,10 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
ethash.caches[epoch] = current
// If we just used up the future cache, or need a refresh, regenerate
if ethash.future == nil || ethash.future.epoch <= epoch {
if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
log.Debug("Requiring new future ethash cache", "epoch", epoch+1)
future = &cache{epoch: epoch + 1}
ethash.future = future
ethash.fcache = future
}
}
current.used = time.Now()
@ -254,7 +348,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
current.used = time.Now()
current.lock.Unlock()
// If we exhusted the future cache, now's a goot time to regenerate it
// If we exhausted the future cache, now's a good time to regenerate it
if future != nil {
go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
}
@ -264,7 +358,102 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
// Search implements PoW, attempting to find a nonce that satisfies the block's
// difficulty requirements.
func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
return 0, nil
// Extract some data from the block
var (
hash = block.HashNoNonce().Bytes()
diff = block.Difficulty()
target = new(big.Int).Div(maxUint256, diff)
)
// Retrieve the mining dataset
dataset, size := ethash.dataset(block.NumberU64()), datasetSize(block.NumberU64())
// Start generating random nonces until we abort or find a good one
var (
attempts int64
rand = rand.New(rand.NewSource(time.Now().UnixNano()))
nonce = uint64(rand.Int63())
)
for {
select {
case <-stop:
// Mining terminated, update stats and abort
ethash.hashrate.Mark(attempts)
return 0, nil
default:
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
attempts++
if (attempts % (1 << 15)) == 0 {
ethash.hashrate.Mark(attempts)
attempts = 0
}
// Compute the PoW value of this nonce
digest, result := hashimotoFull(size, dataset, hash, nonce)
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
return nonce, digest
}
nonce++
}
}
}
// dataset tries to retrieve a mining dataset for the specified block number
// by first checking against a list of in-memory datasets, then against DAGs
// stored on disk, and finally generating one if none can be found.
func (ethash *Ethash) dataset(block uint64) []uint32 {
epoch := block / epochLength
// If we have a PoW for that epoch, use that
ethash.lock.Lock()
current, future := ethash.datasets[epoch], (*dataset)(nil)
if current == nil {
// No in-memory dataset, evict the oldest if the dataset limit was reached
for len(ethash.datasets) >= ethash.dagsinmem {
var evict *dataset
for _, dataset := range ethash.datasets {
if evict == nil || evict.used.After(dataset.used) {
evict = dataset
}
}
delete(ethash.datasets, evict.epoch)
log.Debug("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
log.Debug("Using pre-generated dataset", "epoch", epoch)
current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
ethash.fdataset = nil
} else {
log.Debug("Requiring new ethash dataset", "epoch", epoch)
current = &dataset{epoch: epoch}
}
ethash.datasets[epoch] = current
// If we just used up the future dataset, or need a refresh, regenerate
if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
log.Debug("Requiring new future ethash dataset", "epoch", epoch+1)
future = &dataset{epoch: epoch + 1}
ethash.fdataset = future
}
}
current.used = time.Now()
ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache
current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, false)
current.lock.Lock()
current.used = time.Now()
current.lock.Unlock()
// If we exhausted the future dataset, now's a good time to regenerate it
if future != nil {
go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, true) // Discard results from memorys
}
return current.dataset
}
// Hashrate implements PoW, returning the measured rate of the search invocations

@ -1,14 +0,0 @@
cmake_minimum_required(VERSION 2.8.7)
project(ethash)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
set(ETHHASH_LIBS ethash)
if (WIN32 AND WANT_CRYPTOPP)
add_subdirectory(cryptopp)
endif()
add_subdirectory(src/libethash)
add_subdirectory(src/benchmark EXCLUDE_FROM_ALL)
add_subdirectory(test/c)

@ -1,17 +0,0 @@
include setup.py
# C sources
include src/libethash/internal.c
include src/libethash/sha3.c
include src/libethash/util.c
include src/python/core.c
# Headers
include src/libethash/compiler.h
include src/libethash/data_sizes.h
include src/libethash/endian.h
include src/libethash/ethash.h
include src/libethash/fnv.h
include src/libethash/internal.h
include src/libethash/sha3.h
include src/libethash/util.h

@ -1,6 +0,0 @@
.PHONY: clean test
test:
./test/test.sh
clean:
rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST

@ -1,22 +0,0 @@
[![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
# Ethash
For details on this project, please see the Ethereum wiki:
https://github.com/ethereum/wiki/wiki/Ethash
### Coding Style for C++ code:
Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
### Coding Style for C code:
The main thing above all is code consistency.
- Tabs for indentation. A tab is 4 spaces
- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
especially for the C code.
- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
than 110. Some people work with multiple buffers next to each other.
Make them like you :)

@ -1,7 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure(2) do |config|
config.vm.box = "Ubuntu 12.04"
config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box"
end

@ -1,43 +0,0 @@
version: 1.0.0.{build}
environment:
BOOST_ROOT: "c:/projects/ethash/deps/boost"
branches:
only:
- master
- develop
os: Windows Server 2012 R2
clone_folder: c:\projects\ethash
#platform: Any CPU
#configuration: Debug
install:
# by default, all script lines are interpreted as batch
# scripts to run before build
before_build:
- echo "Downloading boost..."
- mkdir c:\projects\ethash\deps
- cd c:\projects\ethash\deps
- curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
- echo "Unzipping boost..."
- 7z x boost.tar.gz > nul
- 7z x boost.tar > nul
- ls
- echo "Running cmake..."
- cd c:\projects\ethash
- cmake .
build:
project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
after_build:
- echo "Running tests..."
- cd c:\projects\ethash\test\c\Debug
- Test.exe
- echo "Finished!"

@ -1,440 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
/*
#include "src/libethash/internal.h"
int ethashGoCallback_cgo(unsigned);
*/
import "C"
import (
"errors"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"os"
"os/user"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/pow"
)
var (
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
sharedLight = new(Light)
)
const (
epochLength uint64 = 30000
cacheSizeForTesting C.uint64_t = 1024
dagSizeForTesting C.uint64_t = 1024 * 32
)
var DefaultDir = defaultDir()
func defaultDir() string {
home := os.Getenv("HOME")
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
if runtime.GOOS == "windows" {
return filepath.Join(home, "AppData", "Ethash")
}
return filepath.Join(home, ".ethash")
}
// cache wraps an ethash_light_t with some metadata
// and automatic memory management.
type cache struct {
epoch uint64
used time.Time
test bool
gen sync.Once // ensures cache is only generated once.
ptr *C.struct_ethash_light
}
// generate creates the actual cache. it can be called from multiple
// goroutines. the first call will generate the cache, subsequent
// calls wait until it is generated.
func (cache *cache) generate() {
cache.gen.Do(func() {
started := time.Now()
seedHash := makeSeedHash(cache.epoch)
log.Debug(fmt.Sprintf("Generating cache for epoch %d (%x)", cache.epoch, seedHash))
size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
if cache.test {
size = cacheSizeForTesting
}
cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
runtime.SetFinalizer(cache, freeCache)
log.Debug(fmt.Sprintf("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started)))
})
}
func freeCache(cache *cache) {
C.ethash_light_delete(cache.ptr)
cache.ptr = nil
}
func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
// Make sure cache is live until after the C call.
// This is important because a GC might happen and execute
// the finalizer before the call completes.
_ = cache
return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
}
// Light implements the Verify half of the proof of work. It uses a few small
// in-memory caches to verify the nonces found by Full.
type Light struct {
test bool // If set, use a smaller cache size
mu sync.Mutex // Protects the per-epoch map of verification caches
caches map[uint64]*cache // Currently maintained verification caches
future *cache // Pre-generated cache for the estimated future DAG
NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
}
// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
// TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
blockNum := block.NumberU64()
if blockNum >= epochLength*2048 {
log.Debug(fmt.Sprintf("block number %d too high, limit is %d", epochLength*2048))
return false
}
difficulty := block.Difficulty()
/* Cannot happen if block header diff is validated prior to PoW, but can
happen if PoW is checked first due to parallel PoW checking.
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
Ethereum protocol consensus rules here which are not in scope of Ethash
*/
if difficulty.Cmp(common.Big0) == 0 {
log.Debug(fmt.Sprintf("invalid block difficulty"))
return false
}
cache := l.getCache(blockNum)
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
if l.test {
dagSize = dagSizeForTesting
}
// Recompute the hash using the cache.
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
if !ok {
return false
}
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
if block.MixDigest() != mixDigest {
return false
}
// The actual check.
target := new(big.Int).Div(maxUint256, difficulty)
return result.Big().Cmp(target) <= 0
}
func h256ToHash(in C.ethash_h256_t) common.Hash {
return *(*common.Hash)(unsafe.Pointer(&in.b))
}
func hashToH256(in common.Hash) C.ethash_h256_t {
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
}
func (l *Light) getCache(blockNum uint64) *cache {
var c *cache
epoch := blockNum / epochLength
// If we have a PoW for that epoch, use that
l.mu.Lock()
if l.caches == nil {
l.caches = make(map[uint64]*cache)
}
if l.NumCaches == 0 {
l.NumCaches = 3
}
c = l.caches[epoch]
if c == nil {
// No cached DAG, evict the oldest if the cache limit was reached
if len(l.caches) >= l.NumCaches {
var evict *cache
for _, cache := range l.caches {
if evict == nil || evict.used.After(cache.used) {
evict = cache
}
}
log.Debug(fmt.Sprintf("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch))
delete(l.caches, evict.epoch)
}
// If we have the new DAG pre-generated, use that, otherwise create a new one
if l.future != nil && l.future.epoch == epoch {
log.Debug(fmt.Sprintf("Using pre-generated DAG for epoch %d", epoch))
c, l.future = l.future, nil
} else {
log.Debug(fmt.Sprintf("No pre-generated DAG available, creating new for epoch %d", epoch))
c = &cache{epoch: epoch, test: l.test}
}
l.caches[epoch] = c
// If we just used up the future cache, or need a refresh, regenerate
if l.future == nil || l.future.epoch <= epoch {
log.Debug(fmt.Sprintf("Pre-generating DAG for epoch %d", epoch+1))
l.future = &cache{epoch: epoch + 1, test: l.test}
go l.future.generate()
}
}
c.used = time.Now()
l.mu.Unlock()
// Wait for generation finish and return the cache
c.generate()
return c
}
// dag wraps an ethash_full_t with some metadata
// and automatic memory management.
type dag struct {
epoch uint64
test bool
dir string
gen sync.Once // ensures DAG is only generated once.
ptr *C.struct_ethash_full
}
// generate creates the actual DAG. it can be called from multiple
// goroutines. the first call will generate the DAG, subsequent
// calls wait until it is generated.
func (d *dag) generate() {
d.gen.Do(func() {
var (
started = time.Now()
seedHash = makeSeedHash(d.epoch)
blockNum = C.uint64_t(d.epoch * epochLength)
cacheSize = C.ethash_get_cachesize(blockNum)
dagSize = C.ethash_get_datasize(blockNum)
)
if d.test {
cacheSize = cacheSizeForTesting
dagSize = dagSizeForTesting
}
if d.dir == "" {
d.dir = DefaultDir
}
log.Info(fmt.Sprintf("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash))
// Generate a temporary cache.
// TODO: this could share the cache with Light
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
defer C.ethash_light_delete(cache)
// Generate the actual DAG.
d.ptr = C.ethash_full_new_internal(
C.CString(d.dir),
hashToH256(seedHash),
dagSize,
cache,
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
)
if d.ptr == nil {
panic("ethash_full_new IO or memory error")
}
runtime.SetFinalizer(d, freeDAG)
log.Info(fmt.Sprintf("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started)))
})
}
func freeDAG(d *dag) {
C.ethash_full_delete(d.ptr)
d.ptr = nil
}
func (d *dag) Ptr() unsafe.Pointer {
return unsafe.Pointer(d.ptr.data)
}
//export ethashGoCallback
func ethashGoCallback(percent C.unsigned) C.int {
log.Info(fmt.Sprintf("Generating DAG: %d%%", percent))
return 0
}
// MakeDAG pre-generates a DAG file for the given block number in the
// given directory. If dir is the empty string, the default directory
// is used.
func MakeDAG(blockNum uint64, dir string) error {
d := &dag{epoch: blockNum / epochLength, dir: dir}
if blockNum >= epochLength*2048 {
return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
}
d.generate()
if d.ptr == nil {
return errors.New("failed")
}
return nil
}
// Full implements the Search half of the proof of work.
type Full struct {
Dir string // use this to specify a non-default DAG directory
test bool // if set use a smaller DAG size
turbo bool
hashRate int32
mu sync.Mutex // protects dag
current *dag // current full DAG
}
func (pow *Full) getDAG(blockNum uint64) (d *dag) {
epoch := blockNum / epochLength
pow.mu.Lock()
if pow.current != nil && pow.current.epoch == epoch {
d = pow.current
} else {
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
pow.current = d
}
pow.mu.Unlock()
// wait for it to finish generating.
d.generate()
return d
}
func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
dag := pow.getDAG(block.NumberU64())
r := rand.New(rand.NewSource(time.Now().UnixNano()))
diff := block.Difficulty()
i := int64(0)
starti := i
start := time.Now().UnixNano()
previousHashrate := int32(0)
nonce = uint64(r.Int63())
hash := hashToH256(block.HashNoNonce())
target := new(big.Int).Div(maxUint256, diff)
for {
select {
case <-stop:
atomic.AddInt32(&pow.hashRate, -previousHashrate)
return 0, nil
default:
i++
// we don't have to update hash rate on every nonce, so update after
// first nonce check and then after 2^X nonces
if i == 2 || ((i % (1 << 16)) == 0) {
elapsed := time.Now().UnixNano() - start
hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
hashrateDiff := int32(hashes) - previousHashrate
previousHashrate = int32(hashes)
atomic.AddInt32(&pow.hashRate, hashrateDiff)
}
ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
result := h256ToHash(ret.result).Big()
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
if ret.success && result.Cmp(target) <= 0 {
mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
atomic.AddInt32(&pow.hashRate, -previousHashrate)
return nonce, mixDigest
}
nonce += 1
}
if !pow.turbo {
time.Sleep(20 * time.Microsecond)
}
}
}
func (pow *Full) GetHashrate() int64 {
return int64(atomic.LoadInt32(&pow.hashRate))
}
func (pow *Full) Turbo(on bool) {
// TODO: this needs to use an atomic operation.
pow.turbo = on
}
// Ethash combines block verification with Light and
// nonce searching with Full into a single proof of work.
type Ethash struct {
*Light
*Full
}
// New creates an instance of the proof of work.
func New() *Ethash {
return &Ethash{new(Light), &Full{turbo: true}}
}
// NewShared creates an instance of the proof of work., where a single instance
// of the Light cache is shared across all instances created with NewShared.
func NewShared() *Ethash {
return &Ethash{sharedLight, &Full{turbo: true}}
}
// NewForTesting creates a proof of work for use in unit tests.
// It uses a smaller DAG and cache size to keep test times low.
// DAG files are stored in a temporary directory.
//
// Nonces found by a testing instance are not verifiable with a
// regular-size cache.
func NewForTesting() (*Ethash, error) {
dir, err := ioutil.TempDir("", "ethash-test")
if err != nil {
return nil, err
}
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
}
func GetSeedHash(blockNum uint64) ([]byte, error) {
if blockNum >= epochLength*2048 {
return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
}
sh := makeSeedHash(blockNum / epochLength)
return sh[:], nil
}
func makeSeedHash(epoch uint64) (sh common.Hash) {
for ; epoch > 0; epoch-- {
sh = crypto.Sha3Hash(sh[:])
}
return sh
}

@ -1,51 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
/*
-mno-stack-arg-probe disables stack probing which avoids the function
__chkstk_ms being linked. this avoids a clash of this symbol as we also
separately link the secp256k1 lib which ends up defining this symbol
1. https://gcc.gnu.org/onlinedocs/gccint/Stack-Checking.html
2. https://groups.google.com/forum/#!msg/golang-dev/v1bziURSQ4k/88fXuJ24e-gJ
3. https://groups.google.com/forum/#!topic/golang-nuts/VNP6Mwz_B6o
*/
/*
#cgo CFLAGS: -std=gnu99 -Wall
#cgo windows CFLAGS: -mno-stack-arg-probe
#cgo LDFLAGS: -lm
#include "src/libethash/internal.c"
#include "src/libethash/sha3.c"
#include "src/libethash/io.c"
#ifdef _WIN32
# include "src/libethash/io_win32.c"
# include "src/libethash/mmap_win32.c"
#else
# include "src/libethash/io_posix.c"
#endif
// 'gateway function' for calling back into go.
extern int ethashGoCallback(unsigned);
int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
*/
import "C"

@ -1,47 +0,0 @@
#!/usr/bin/env python
import os
from distutils.core import setup, Extension
sources = [
'src/python/core.c',
'src/libethash/io.c',
'src/libethash/internal.c',
'src/libethash/sha3.c']
if os.name == 'nt':
sources += [
'src/libethash/util_win32.c',
'src/libethash/io_win32.c',
'src/libethash/mmap_win32.c',
]
else:
sources += [
'src/libethash/io_posix.c'
]
depends = [
'src/libethash/ethash.h',
'src/libethash/compiler.h',
'src/libethash/data_sizes.h',
'src/libethash/endian.h',
'src/libethash/ethash.h',
'src/libethash/io.h',
'src/libethash/fnv.h',
'src/libethash/internal.h',
'src/libethash/sha3.h',
'src/libethash/util.h',
]
pyethash = Extension('pyethash',
sources=sources,
depends=depends,
extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
setup(
name='pyethash',
author="Matthew Wampler-Doty",
author_email="matthew.wampler.doty@gmail.com",
license='GPL',
version='0.1.23',
url='https://github.com/ethereum/ethash',
download_url='https://github.com/ethereum/ethash/tarball/v23',
description=('Python wrappers for ethash, the ethereum proof of work'
'hashing function'),
ext_modules=[pyethash],
)

6
vendor/vendor.json vendored

@ -34,12 +34,6 @@
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
"revisionTime": "2016-10-29T20:57:26Z"
},
{
"checksumSHA1": "HdOu+ISNyVWMyBao7vDI21uZH6U=",
"path": "github.com/ethereum/ethash",
"revision": "214d4c008e101d4f7b18318389fb8c2e00323f24",
"revisionTime": "2016-10-25T09:19:48Z"
},
{
"checksumSHA1": "7oFpbmDfGobwKsFLIf6wMUvVoKw=",
"path": "github.com/fatih/color",