Merge pull request #859 from Gustav-Simonsson/ethash_reloaded
Ethash reloaded
This commit is contained in:
commit
1f26a1b863
4
Godeps/Godeps.json
generated
4
Godeps/Godeps.json
generated
@ -17,8 +17,8 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/ethash",
|
||||
"Comment": "v23.1-82-g908aad3",
|
||||
"Rev": "908aad345c9fbf3ab9bbb94031dc02d0d90df1b8"
|
||||
"Comment": "v23.1-195-g4d50db9",
|
||||
"Rev": "4d50db90d8bb5f2fae357570366cb8c657a4ddfc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/fsnotify",
|
||||
|
19
Godeps/_workspace/src/github.com/ethereum/ethash/.travis.yml
generated
vendored
19
Godeps/_workspace/src/github.com/ethereum/ethash/.travis.yml
generated
vendored
@ -1,14 +1,23 @@
|
||||
# making our travis.yml play well with C++11 by obtaining g++4.8
|
||||
# Taken from this file:
|
||||
# https://github.com/beark/ftl/blob/master/.travis.yml
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
|
||||
before_install:
|
||||
# for g++4.8 and C++11
|
||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
|
||||
# Set up go-ethereum
|
||||
- sudo apt-get update -y -qq
|
||||
- sudo apt-get install -yqq libgmp3-dev
|
||||
- git clone --depth=10 https://github.com/ethereum/go-ethereum ${GOPATH}/src/github.com/ethereum/go-ethereum
|
||||
# use canned dependencies from the go-ethereum repository
|
||||
- export GOPATH=$GOPATH:$GOPATH/src/github.com/ethereum/go-ethereum/Godeps/_workspace/
|
||||
- echo $GOPATH
|
||||
|
||||
install:
|
||||
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
|
||||
- sudo apt-get install -qq --yes --force-yes g++-4.8
|
||||
- sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
|
||||
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
|
||||
- sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev
|
||||
- sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind
|
||||
- sudo pip install virtualenv -q
|
||||
script: "./test/test.sh"
|
||||
|
17
Godeps/_workspace/src/github.com/ethereum/ethash/README.md
generated
vendored
17
Godeps/_workspace/src/github.com/ethereum/ethash/README.md
generated
vendored
@ -1,7 +1,22 @@
|
||||
[![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash)
|
||||
|
||||
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
|
||||
|
||||
# Ethash
|
||||
|
||||
For details on this project, please see the Ethereum wiki:
|
||||
https://github.com/ethereum/wiki/wiki/Ethash
|
||||
|
||||
### Coding Style for C++ code:
|
||||
|
||||
Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
|
||||
|
||||
### Coding Style for C code:
|
||||
|
||||
The main thing above all is code consistency.
|
||||
|
||||
- Tabs for indentation. A tab is 4 spaces
|
||||
- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
|
||||
especially for the C code.
|
||||
- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
|
||||
than 110. Some people work with multiple buffers next to each other.
|
||||
Make them like you :)
|
||||
|
43
Godeps/_workspace/src/github.com/ethereum/ethash/appveyor.yml
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/ethereum/ethash/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
version: 1.0.0.{build}
|
||||
|
||||
environment:
|
||||
BOOST_ROOT: "c:/projects/ethash/deps/boost"
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\projects\ethash
|
||||
|
||||
#platform: Any CPU
|
||||
#configuration: Debug
|
||||
|
||||
install:
|
||||
# by default, all script lines are interpreted as batch
|
||||
|
||||
# scripts to run before build
|
||||
before_build:
|
||||
- echo "Downloading boost..."
|
||||
- mkdir c:\projects\ethash\deps
|
||||
- cd c:\projects\ethash\deps
|
||||
- curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
|
||||
- echo "Unzipping boost..."
|
||||
- 7z x boost.tar.gz > nul
|
||||
- 7z x boost.tar > nul
|
||||
- ls
|
||||
- echo "Running cmake..."
|
||||
- cd c:\projects\ethash
|
||||
- cmake .
|
||||
|
||||
build:
|
||||
project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
|
||||
|
||||
after_build:
|
||||
- echo "Running tests..."
|
||||
- cd c:\projects\ethash\test\c\Debug
|
||||
- Test.exe
|
||||
- echo "Finished!"
|
||||
|
640
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
640
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
@ -1,32 +1,22 @@
|
||||
/*
|
||||
###################################################################################
|
||||
###################################################################################
|
||||
#################### ####################
|
||||
#################### EDIT AND YOU SHALL FEEL MY WRATH - jeff ####################
|
||||
#################### ####################
|
||||
###################################################################################
|
||||
###################################################################################
|
||||
*/
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -std=gnu99 -Wall
|
||||
#include "src/libethash/util.c"
|
||||
#include "src/libethash/internal.c"
|
||||
#include "src/libethash/sha3.c"
|
||||
#include "src/libethash/internal.h"
|
||||
|
||||
int ethashGoCallback_cgo(unsigned);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
@ -38,318 +28,267 @@ import (
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
var minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
var (
|
||||
minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
sharedLight = new(Light)
|
||||
)
|
||||
|
||||
type ParamsAndCache struct {
|
||||
params *C.ethash_params
|
||||
cache *C.ethash_cache
|
||||
Epoch uint64
|
||||
const (
|
||||
epochLength uint64 = 30000
|
||||
cacheSizeForTesting C.uint64_t = 1024
|
||||
dagSizeForTesting C.uint64_t = 1024 * 32
|
||||
)
|
||||
|
||||
var DefaultDir = defaultDir()
|
||||
|
||||
func defaultDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if user, err := user.Current(); err == nil {
|
||||
home = user.HomeDir
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(home, "AppData", "Ethash")
|
||||
}
|
||||
return filepath.Join(home, ".ethash")
|
||||
}
|
||||
|
||||
type DAG struct {
|
||||
dag unsafe.Pointer // full GB of memory for dag
|
||||
file bool
|
||||
paramsAndCache *ParamsAndCache
|
||||
// cache wraps an ethash_light_t with some metadata
|
||||
// and automatic memory management.
|
||||
type cache struct {
|
||||
epoch uint64
|
||||
test bool
|
||||
|
||||
gen sync.Once // ensures cache is only generated once.
|
||||
ptr *C.struct_ethash_light
|
||||
}
|
||||
|
||||
type Ethash struct {
|
||||
turbo bool
|
||||
HashRate int64
|
||||
chainManager pow.ChainManager
|
||||
dag *DAG
|
||||
paramsAndCache *ParamsAndCache
|
||||
ret *C.ethash_return_value
|
||||
dagMutex *sync.RWMutex
|
||||
cacheMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
func parseNonce(nonce []byte) (uint64, error) {
|
||||
nonceBuf := bytes.NewBuffer(nonce)
|
||||
nonceInt, err := binary.ReadUvarint(nonceBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return nonceInt, nil
|
||||
}
|
||||
|
||||
const epochLength uint64 = 30000
|
||||
|
||||
func makeParamsAndCache(chainManager pow.ChainManager, blockNum uint64) (*ParamsAndCache, error) {
|
||||
if blockNum >= epochLength*2048 {
|
||||
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048)
|
||||
}
|
||||
paramsAndCache := &ParamsAndCache{
|
||||
params: new(C.ethash_params),
|
||||
cache: new(C.ethash_cache),
|
||||
Epoch: blockNum / epochLength,
|
||||
}
|
||||
C.ethash_params_init(paramsAndCache.params, C.uint32_t(uint32(blockNum)))
|
||||
paramsAndCache.cache.mem = C.malloc(C.size_t(paramsAndCache.params.cache_size))
|
||||
|
||||
seedHash, err := GetSeedHash(blockNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("Making cache for epoch: %d (%v) (%x)\n", paramsAndCache.Epoch, blockNum, seedHash)
|
||||
start := time.Now()
|
||||
C.ethash_mkcache(paramsAndCache.cache, paramsAndCache.params, (*C.ethash_blockhash_t)(unsafe.Pointer(&seedHash[0])))
|
||||
|
||||
if glog.V(logger.Info) {
|
||||
glog.Infoln("Took:", time.Since(start))
|
||||
}
|
||||
|
||||
return paramsAndCache, nil
|
||||
}
|
||||
|
||||
func (pow *Ethash) UpdateCache(blockNum uint64, force bool) error {
|
||||
pow.cacheMutex.Lock()
|
||||
defer pow.cacheMutex.Unlock()
|
||||
|
||||
thisEpoch := blockNum / epochLength
|
||||
if force || pow.paramsAndCache.Epoch != thisEpoch {
|
||||
var err error
|
||||
pow.paramsAndCache, err = makeParamsAndCache(pow.chainManager, blockNum)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// generate creates the actual cache. it can be called from multiple
|
||||
// goroutines. the first call will generate the cache, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (cache *cache) generate() {
|
||||
cache.gen.Do(func() {
|
||||
started := time.Now()
|
||||
seedHash := makeSeedHash(cache.epoch)
|
||||
glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
|
||||
size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
|
||||
if cache.test {
|
||||
size = cacheSizeForTesting
|
||||
}
|
||||
}
|
||||
cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
runtime.SetFinalizer(cache, freeCache)
|
||||
glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeCache(cache *cache) {
|
||||
C.ethash_light_delete(cache.ptr)
|
||||
cache.ptr = nil
|
||||
}
|
||||
|
||||
// Light implements the Verify half of the proof of work.
|
||||
// It uses a small in-memory cache to verify the nonces
|
||||
// found by Full.
|
||||
type Light struct {
|
||||
test bool // if set use a smaller cache size
|
||||
mu sync.Mutex // protects current
|
||||
current *cache // last cache which was generated.
|
||||
// TODO: keep multiple caches.
|
||||
}
|
||||
|
||||
// Verify checks whether the block's nonce is valid.
|
||||
func (l *Light) Verify(block pow.Block) bool {
|
||||
// TODO: do ethash_quick_verify before getCache in order
|
||||
// to prevent DOS attacks.
|
||||
var (
|
||||
blockNum = block.NumberU64()
|
||||
difficulty = block.Difficulty()
|
||||
cache = l.getCache(blockNum)
|
||||
dagSize = C.ethash_get_datasize(C.uint64_t(blockNum))
|
||||
)
|
||||
if l.test {
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
if blockNum >= epochLength*2048 {
|
||||
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
|
||||
return false
|
||||
}
|
||||
// Recompute the hash using the cache.
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
|
||||
if !ret.success {
|
||||
return false
|
||||
}
|
||||
// Make sure cache is live until after the C call.
|
||||
// This is important because a GC might happen and execute
|
||||
// the finalizer before the call completes.
|
||||
_ = cache
|
||||
// The actual check.
|
||||
target := new(big.Int).Div(minDifficulty, difficulty)
|
||||
return h256ToHash(ret.result).Big().Cmp(target) <= 0
|
||||
}
|
||||
|
||||
func h256ToHash(in C.ethash_h256_t) common.Hash {
|
||||
return *(*common.Hash)(unsafe.Pointer(&in.b))
|
||||
}
|
||||
|
||||
func hashToH256(in common.Hash) C.ethash_h256_t {
|
||||
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
|
||||
}
|
||||
|
||||
func (l *Light) getCache(blockNum uint64) *cache {
|
||||
var c *cache
|
||||
epoch := blockNum / epochLength
|
||||
// Update or reuse the last cache.
|
||||
l.mu.Lock()
|
||||
if l.current != nil && l.current.epoch == epoch {
|
||||
c = l.current
|
||||
} else {
|
||||
c = &cache{epoch: epoch, test: l.test}
|
||||
l.current = c
|
||||
}
|
||||
l.mu.Unlock()
|
||||
// Wait for the cache to finish generating.
|
||||
c.generate()
|
||||
return c
|
||||
}
|
||||
|
||||
// dag wraps an ethash_full_t with some metadata
|
||||
// and automatic memory management.
|
||||
type dag struct {
|
||||
epoch uint64
|
||||
test bool
|
||||
dir string
|
||||
|
||||
gen sync.Once // ensures DAG is only generated once.
|
||||
ptr *C.struct_ethash_full
|
||||
}
|
||||
|
||||
// generate creates the actual DAG. it can be called from multiple
|
||||
// goroutines. the first call will generate the DAG, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (d *dag) generate() {
|
||||
d.gen.Do(func() {
|
||||
var (
|
||||
started = time.Now()
|
||||
seedHash = makeSeedHash(d.epoch)
|
||||
blockNum = C.uint64_t(d.epoch * epochLength)
|
||||
cacheSize = C.ethash_get_cachesize(blockNum)
|
||||
dagSize = C.ethash_get_datasize(blockNum)
|
||||
)
|
||||
if d.test {
|
||||
cacheSize = cacheSizeForTesting
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
if d.dir == "" {
|
||||
d.dir = DefaultDir
|
||||
}
|
||||
glog.V(logger.Info).Infof("Generating DAG for epoch %d (%x)", d.epoch, seedHash)
|
||||
// Generate a temporary cache.
|
||||
// TODO: this could share the cache with Light
|
||||
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
defer C.ethash_light_delete(cache)
|
||||
// Generate the actual DAG.
|
||||
d.ptr = C.ethash_full_new_internal(
|
||||
C.CString(d.dir),
|
||||
hashToH256(seedHash),
|
||||
dagSize,
|
||||
cache,
|
||||
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
|
||||
)
|
||||
if d.ptr == nil {
|
||||
panic("ethash_full_new IO or memory error")
|
||||
}
|
||||
runtime.SetFinalizer(d, freeDAG)
|
||||
glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeDAG(h *dag) {
|
||||
C.ethash_full_delete(h.ptr)
|
||||
h.ptr = nil
|
||||
}
|
||||
|
||||
//export ethashGoCallback
|
||||
func ethashGoCallback(percent C.unsigned) C.int {
|
||||
glog.V(logger.Info).Infof("Still generating DAG: %d%%", percent)
|
||||
return 0
|
||||
}
|
||||
|
||||
// MakeDAG pre-generates a DAG file for the given block number in the
|
||||
// given directory. If dir is the empty string, the default directory
|
||||
// is used.
|
||||
func MakeDAG(blockNum uint64, dir string) error {
|
||||
d := &dag{epoch: blockNum / epochLength, dir: dir}
|
||||
if blockNum >= epochLength*2048 {
|
||||
return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
d.generate()
|
||||
if d.ptr == nil {
|
||||
return errors.New("failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeDAG(p *ParamsAndCache) *DAG {
|
||||
d := &DAG{
|
||||
dag: C.malloc(C.size_t(p.params.full_size)),
|
||||
file: false,
|
||||
paramsAndCache: p,
|
||||
}
|
||||
// Full implements the Search half of the proof of work.
|
||||
type Full struct {
|
||||
Dir string // use this to specify a non-default DAG directory
|
||||
|
||||
donech := make(chan string)
|
||||
go func() {
|
||||
t := time.NewTicker(5 * time.Second)
|
||||
tstart := time.Now()
|
||||
done:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
glog.V(logger.Info).Infof("... still generating DAG (%v) ...\n", time.Since(tstart).Seconds())
|
||||
case str := <-donech:
|
||||
glog.V(logger.Info).Infof("... %s ...\n", str)
|
||||
break done
|
||||
}
|
||||
}
|
||||
}()
|
||||
C.ethash_compute_full_data(d.dag, p.params, p.cache)
|
||||
donech <- "DAG generation completed"
|
||||
test bool // if set use a smaller DAG size
|
||||
turbo bool
|
||||
hashRate int64
|
||||
|
||||
mu sync.Mutex // protects dag
|
||||
current *dag // current full DAG
|
||||
}
|
||||
|
||||
func (pow *Full) getDAG(blockNum uint64) (d *dag) {
|
||||
epoch := blockNum / epochLength
|
||||
pow.mu.Lock()
|
||||
if pow.current != nil && pow.current.epoch == epoch {
|
||||
d = pow.current
|
||||
} else {
|
||||
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
|
||||
pow.current = d
|
||||
}
|
||||
pow.mu.Unlock()
|
||||
// wait for it to finish generating.
|
||||
d.generate()
|
||||
return d
|
||||
}
|
||||
|
||||
func (pow *Ethash) writeDagToDisk(dag *DAG, epoch uint64) *os.File {
|
||||
if epoch > 2048 {
|
||||
panic(fmt.Errorf("Epoch must be less than 2048 (is %v)", epoch))
|
||||
}
|
||||
data := C.GoBytes(unsafe.Pointer(dag.dag), C.int(dag.paramsAndCache.params.full_size))
|
||||
file, err := os.Create("/tmp/dag")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dataEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(dataEpoch, epoch)
|
||||
|
||||
file.Write(dataEpoch)
|
||||
file.Write(data)
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func (pow *Ethash) UpdateDAG() {
|
||||
blockNum := pow.chainManager.CurrentBlock().NumberU64()
|
||||
if blockNum >= epochLength*2048 {
|
||||
// This will crash in the 2030s or 2040s
|
||||
panic(fmt.Errorf("Current block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048))
|
||||
}
|
||||
|
||||
pow.dagMutex.Lock()
|
||||
defer pow.dagMutex.Unlock()
|
||||
thisEpoch := blockNum / epochLength
|
||||
if pow.dag == nil || pow.dag.paramsAndCache.Epoch != thisEpoch {
|
||||
if pow.dag != nil && pow.dag.dag != nil {
|
||||
C.free(pow.dag.dag)
|
||||
pow.dag.dag = nil
|
||||
}
|
||||
|
||||
if pow.dag != nil && pow.dag.paramsAndCache.cache.mem != nil {
|
||||
C.free(pow.dag.paramsAndCache.cache.mem)
|
||||
pow.dag.paramsAndCache.cache.mem = nil
|
||||
}
|
||||
|
||||
// Make the params and cache for the DAG
|
||||
paramsAndCache, err := makeParamsAndCache(pow.chainManager, blockNum)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// TODO: On non-SSD disks, loading the DAG from disk takes longer than generating it in memory
|
||||
pow.paramsAndCache = paramsAndCache
|
||||
path := path.Join("/", "tmp", "dag")
|
||||
pow.dag = nil
|
||||
glog.V(logger.Info).Infoln("Retrieving DAG")
|
||||
start := time.Now()
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
glog.V(logger.Info).Infof("No DAG found. Generating new DAG in '%s' (this takes a while)...\n", path)
|
||||
pow.dag = makeDAG(paramsAndCache)
|
||||
file = pow.writeDagToDisk(pow.dag, thisEpoch)
|
||||
pow.dag.file = true
|
||||
} else {
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
glog.V(logger.Info).Infof("DAG load err: %v\n", err)
|
||||
}
|
||||
|
||||
if len(data) < 8 {
|
||||
glog.V(logger.Info).Infof("DAG in '%s' is less than 8 bytes, it must be corrupted. Generating new DAG (this takes a while)...\n", path)
|
||||
pow.dag = makeDAG(paramsAndCache)
|
||||
file = pow.writeDagToDisk(pow.dag, thisEpoch)
|
||||
pow.dag.file = true
|
||||
} else {
|
||||
dataEpoch := binary.BigEndian.Uint64(data[0:8])
|
||||
if dataEpoch < thisEpoch {
|
||||
glog.V(logger.Info).Infof("DAG in '%s' is stale. Generating new DAG (this takes a while)...\n", path)
|
||||
pow.dag = makeDAG(paramsAndCache)
|
||||
file = pow.writeDagToDisk(pow.dag, thisEpoch)
|
||||
pow.dag.file = true
|
||||
} else if dataEpoch > thisEpoch {
|
||||
// FIXME
|
||||
panic(fmt.Errorf("Saved DAG in '%s' reports to be from future epoch %v (current epoch is %v)\n", path, dataEpoch, thisEpoch))
|
||||
} else if len(data) != (int(paramsAndCache.params.full_size) + 8) {
|
||||
glog.V(logger.Info).Infof("DAG in '%s' is corrupted. Generating new DAG (this takes a while)...\n", path)
|
||||
pow.dag = makeDAG(paramsAndCache)
|
||||
file = pow.writeDagToDisk(pow.dag, thisEpoch)
|
||||
pow.dag.file = true
|
||||
} else {
|
||||
data = data[8:]
|
||||
pow.dag = &DAG{
|
||||
dag: unsafe.Pointer(&data[0]),
|
||||
file: true,
|
||||
paramsAndCache: paramsAndCache,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(logger.Info).Infoln("Took:", time.Since(start))
|
||||
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func New(chainManager pow.ChainManager) *Ethash {
|
||||
paramsAndCache, err := makeParamsAndCache(chainManager, chainManager.CurrentBlock().NumberU64())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &Ethash{
|
||||
turbo: true,
|
||||
paramsAndCache: paramsAndCache,
|
||||
chainManager: chainManager,
|
||||
dag: nil,
|
||||
cacheMutex: new(sync.RWMutex),
|
||||
dagMutex: new(sync.RWMutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (pow *Ethash) DAGSize() uint64 {
|
||||
return uint64(pow.dag.paramsAndCache.params.full_size)
|
||||
}
|
||||
|
||||
func (pow *Ethash) CacheSize() uint64 {
|
||||
return uint64(pow.paramsAndCache.params.cache_size)
|
||||
}
|
||||
|
||||
func GetSeedHash(blockNum uint64) ([]byte, error) {
|
||||
if blockNum >= epochLength*2048 {
|
||||
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048)
|
||||
}
|
||||
|
||||
epoch := blockNum / epochLength
|
||||
seedHash := make([]byte, 32)
|
||||
var i uint64
|
||||
for i = 0; i < 32; i++ {
|
||||
seedHash[i] = 0
|
||||
}
|
||||
for i = 0; i < epoch; i++ {
|
||||
seedHash = crypto.Sha3(seedHash)
|
||||
}
|
||||
return seedHash, nil
|
||||
}
|
||||
|
||||
func (pow *Ethash) Stop() {
|
||||
pow.cacheMutex.Lock()
|
||||
pow.dagMutex.Lock()
|
||||
defer pow.dagMutex.Unlock()
|
||||
defer pow.cacheMutex.Unlock()
|
||||
|
||||
if pow.paramsAndCache.cache != nil {
|
||||
C.free(pow.paramsAndCache.cache.mem)
|
||||
}
|
||||
if pow.dag.dag != nil && !pow.dag.file {
|
||||
C.free(pow.dag.dag)
|
||||
}
|
||||
if pow.dag != nil && pow.dag.paramsAndCache != nil && pow.dag.paramsAndCache.cache.mem != nil {
|
||||
C.free(pow.dag.paramsAndCache.cache.mem)
|
||||
pow.dag.paramsAndCache.cache.mem = nil
|
||||
}
|
||||
pow.dag.dag = nil
|
||||
}
|
||||
|
||||
func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) {
|
||||
pow.UpdateDAG()
|
||||
|
||||
pow.dagMutex.RLock()
|
||||
defer pow.dagMutex.RUnlock()
|
||||
func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
|
||||
dag := pow.getDAG(block.NumberU64())
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
miningHash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
|
||||
i := int64(0)
|
||||
starti := i
|
||||
start := time.Now().UnixNano()
|
||||
|
||||
nonce := uint64(r.Int63())
|
||||
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0]))
|
||||
nonce = uint64(r.Int63())
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
target := new(big.Int).Div(minDifficulty, diff)
|
||||
|
||||
var ret C.ethash_return_value
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
pow.HashRate = 0
|
||||
return 0, nil, nil
|
||||
pow.hashRate = 0
|
||||
return 0, nil
|
||||
default:
|
||||
i++
|
||||
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
|
||||
pow.HashRate = int64(hashes)
|
||||
pow.hashRate = int64(hashes)
|
||||
|
||||
C.ethash_full(&ret, pow.dag.dag, pow.dag.paramsAndCache.params, cMiningHash, C.uint64_t(nonce))
|
||||
result := common.Bytes2Big(C.GoBytes(unsafe.Pointer(&ret.result), C.int(32)))
|
||||
ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
|
||||
result := h256ToHash(ret.result).Big()
|
||||
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
if result.Cmp(target) <= 0 {
|
||||
mixDigest := C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
|
||||
seedHash, err := GetSeedHash(block.NumberU64()) // This seedhash is useless
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nonce, mixDigest, seedHash
|
||||
if ret.success && result.Cmp(target) <= 0 {
|
||||
mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
|
||||
return nonce, mixDigest
|
||||
}
|
||||
|
||||
nonce += 1
|
||||
}
|
||||
|
||||
@ -357,82 +296,57 @@ func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte
|
||||
time.Sleep(20 * time.Microsecond)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (pow *Ethash) Verify(block pow.Block) bool {
|
||||
|
||||
return pow.verify(block.HashNoNonce(), block.MixDigest(), block.Difficulty(), block.NumberU64(), block.Nonce())
|
||||
func (pow *Full) GetHashrate() int64 {
|
||||
// TODO: this needs to use an atomic operation.
|
||||
return pow.hashRate
|
||||
}
|
||||
|
||||
func (pow *Ethash) verify(hash common.Hash, mixDigest common.Hash, difficulty *big.Int, blockNum uint64, nonce uint64) bool {
|
||||
// Make sure the block num is valid
|
||||
if blockNum >= epochLength*2048 {
|
||||
glog.V(logger.Info).Infoln(fmt.Sprintf("Block number exceeds limit, invalid (value is %v, limit is %v)",
|
||||
blockNum, epochLength*2048))
|
||||
return false
|
||||
}
|
||||
|
||||
// First check: make sure header, mixDigest, nonce are correct without hitting the cache
|
||||
// This is to prevent DOS attacks
|
||||
chash := (*C.ethash_blockhash_t)(unsafe.Pointer(&hash[0]))
|
||||
cnonce := C.uint64_t(nonce)
|
||||
target := new(big.Int).Div(minDifficulty, difficulty)
|
||||
|
||||
var pAc *ParamsAndCache
|
||||
// If its an old block (doesn't use the current cache)
|
||||
// get the cache for it but don't update (so we don't need the mutex)
|
||||
// Otherwise, it's the current block or a future block.
|
||||
// If current, updateCache will do nothing.
|
||||
if blockNum/epochLength < pow.paramsAndCache.Epoch {
|
||||
var err error
|
||||
// If we can't make the params for some reason, this block is invalid
|
||||
pAc, err = makeParamsAndCache(pow.chainManager, blockNum)
|
||||
if err != nil {
|
||||
glog.V(logger.Info).Infoln("big fucking eror", err)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
pow.UpdateCache(blockNum, false)
|
||||
pow.cacheMutex.RLock()
|
||||
defer pow.cacheMutex.RUnlock()
|
||||
pAc = pow.paramsAndCache
|
||||
}
|
||||
|
||||
ret := new(C.ethash_return_value)
|
||||
|
||||
C.ethash_light(ret, pAc.cache, pAc.params, chash, cnonce)
|
||||
|
||||
result := common.Bytes2Big(C.GoBytes(unsafe.Pointer(&ret.result), C.int(32)))
|
||||
return result.Cmp(target) <= 0
|
||||
}
|
||||
|
||||
func (pow *Ethash) GetHashrate() int64 {
|
||||
return pow.HashRate
|
||||
}
|
||||
|
||||
func (pow *Ethash) Turbo(on bool) {
|
||||
func (pow *Full) Turbo(on bool) {
|
||||
// TODO: this needs to use an atomic operation.
|
||||
pow.turbo = on
|
||||
}
|
||||
|
||||
func (pow *Ethash) FullHash(nonce uint64, miningHash []byte) []byte {
|
||||
pow.UpdateDAG()
|
||||
pow.dagMutex.Lock()
|
||||
defer pow.dagMutex.Unlock()
|
||||
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0]))
|
||||
cnonce := C.uint64_t(nonce)
|
||||
ret := new(C.ethash_return_value)
|
||||
// pow.hash is the output/return of ethash_full
|
||||
C.ethash_full(ret, pow.dag.dag, pow.paramsAndCache.params, cMiningHash, cnonce)
|
||||
ghash_full := C.GoBytes(unsafe.Pointer(&ret.result), 32)
|
||||
return ghash_full
|
||||
// Ethash combines block verification with Light and
|
||||
// nonce searching with Full into a single proof of work.
|
||||
type Ethash struct {
|
||||
*Light
|
||||
*Full
|
||||
}
|
||||
|
||||
func (pow *Ethash) LightHash(nonce uint64, miningHash []byte) []byte {
|
||||
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0]))
|
||||
cnonce := C.uint64_t(nonce)
|
||||
ret := new(C.ethash_return_value)
|
||||
C.ethash_light(ret, pow.paramsAndCache.cache, pow.paramsAndCache.params, cMiningHash, cnonce)
|
||||
ghash_light := C.GoBytes(unsafe.Pointer(&ret.result), 32)
|
||||
return ghash_light
|
||||
// New creates an instance of the proof of work.
|
||||
// A single instance of Light is shared across all instances
|
||||
// created with New.
|
||||
func New() *Ethash {
|
||||
return &Ethash{sharedLight, &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewForTesting creates a proof of work for use in unit tests.
|
||||
// It uses a smaller DAG and cache size to keep test times low.
|
||||
// DAG files are stored in a temporary directory.
|
||||
//
|
||||
// Nonces found by a testing instance are not verifiable with a
|
||||
// regular-size cache.
|
||||
func NewForTesting() (*Ethash, error) {
|
||||
dir, err := ioutil.TempDir("", "ethash-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
|
||||
}
|
||||
|
||||
func GetSeedHash(blockNum uint64) ([]byte, error) {
|
||||
if blockNum >= epochLength*2048 {
|
||||
return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
sh := makeSeedHash(blockNum / epochLength)
|
||||
return sh[:], nil
|
||||
}
|
||||
|
||||
func makeSeedHash(epoch uint64) (sh common.Hash) {
|
||||
for ; epoch > 0; epoch-- {
|
||||
sh = crypto.Sha3Hash(sh[:])
|
||||
}
|
||||
return sh
|
||||
}
|
||||
|
176
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_test.go
generated
vendored
Normal file
176
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_test.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// glog.SetV(6)
|
||||
// glog.SetToStderr(true)
|
||||
}
|
||||
|
||||
type testBlock struct {
|
||||
difficulty *big.Int
|
||||
hashNoNonce common.Hash
|
||||
nonce uint64
|
||||
mixDigest common.Hash
|
||||
number uint64
|
||||
}
|
||||
|
||||
func (b *testBlock) Difficulty() *big.Int { return b.difficulty }
|
||||
func (b *testBlock) HashNoNonce() common.Hash { return b.hashNoNonce }
|
||||
func (b *testBlock) Nonce() uint64 { return b.nonce }
|
||||
func (b *testBlock) MixDigest() common.Hash { return b.mixDigest }
|
||||
func (b *testBlock) NumberU64() uint64 { return b.number }
|
||||
|
||||
var validBlocks = []*testBlock{
|
||||
// from proof of concept nine testnet, epoch 0
|
||||
{
|
||||
number: 22,
|
||||
hashNoNonce: common.HexToHash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d"),
|
||||
difficulty: big.NewInt(132416),
|
||||
nonce: 0x495732e0ed7a801c,
|
||||
},
|
||||
// from proof of concept nine testnet, epoch 1
|
||||
{
|
||||
number: 30001,
|
||||
hashNoNonce: common.HexToHash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34"),
|
||||
difficulty: big.NewInt(1532671),
|
||||
nonce: 0x318df1c8adef7e5e,
|
||||
},
|
||||
// from proof of concept nine testnet, epoch 2
|
||||
{
|
||||
number: 60000,
|
||||
hashNoNonce: common.HexToHash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0"),
|
||||
difficulty: big.NewInt(2467358),
|
||||
nonce: 0x50377003e5d830ca,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEthashVerifyValid(t *testing.T) {
|
||||
eth := New()
|
||||
for i, block := range validBlocks {
|
||||
if !eth.Verify(block) {
|
||||
t.Errorf("block %d (%x) did not validate.", i, block.hashNoNonce[:6])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEthashConcurrentVerify(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
block := &testBlock{difficulty: big.NewInt(10)}
|
||||
nonce, _ := eth.Search(block, nil)
|
||||
block.nonce = nonce
|
||||
|
||||
// Verify the block concurrently to check for data races.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
if !eth.Verify(block) {
|
||||
t.Error("Block could not be verified")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestEthashConcurrentSearch(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eth.Turbo(true)
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
// launch n searches concurrently.
|
||||
var (
|
||||
block = &testBlock{difficulty: big.NewInt(35000)}
|
||||
nsearch = 10
|
||||
wg = new(sync.WaitGroup)
|
||||
found = make(chan uint64)
|
||||
stop = make(chan struct{})
|
||||
)
|
||||
rand.Read(block.hashNoNonce[:])
|
||||
wg.Add(nsearch)
|
||||
for i := 0; i < nsearch; i++ {
|
||||
go func() {
|
||||
nonce, _ := eth.Search(block, stop)
|
||||
select {
|
||||
case found <- nonce:
|
||||
case <-stop:
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// wait for one of them to find the nonce
|
||||
nonce := <-found
|
||||
// stop the others
|
||||
close(stop)
|
||||
wg.Wait()
|
||||
|
||||
if block.nonce = nonce; !eth.Verify(block) {
|
||||
t.Error("Block could not be verified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEthashSearchAcrossEpoch(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
for i := epochLength - 40; i < epochLength+40; i++ {
|
||||
block := &testBlock{number: i, difficulty: big.NewInt(90)}
|
||||
rand.Read(block.hashNoNonce[:])
|
||||
nonce, _ := eth.Search(block, nil)
|
||||
block.nonce = nonce
|
||||
if !eth.Verify(block) {
|
||||
t.Fatalf("Block could not be verified")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSeedHash(t *testing.T) {
|
||||
seed0, err := GetSeedHash(0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get seedHash for block 0: %v", err)
|
||||
}
|
||||
if bytes.Compare(seed0, make([]byte, 32)) != 0 {
|
||||
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
|
||||
}
|
||||
seed1, err := GetSeedHash(30000)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// From python:
|
||||
// > from pyethash import get_seedhash
|
||||
// > get_seedhash(30000)
|
||||
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bytes.Compare(seed1, expectedSeed1) != 0 {
|
||||
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
|
||||
}
|
||||
|
||||
}
|
25
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package ethash
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -std=gnu99 -Wall
|
||||
#cgo windows CFLAGS: -mno-stack-arg-probe
|
||||
#cgo LDFLAGS: -lm
|
||||
|
||||
#include "src/libethash/internal.c"
|
||||
#include "src/libethash/sha3.c"
|
||||
#include "src/libethash/io.c"
|
||||
|
||||
#ifdef _WIN32
|
||||
# include "src/libethash/util_win32.c"
|
||||
# include "src/libethash/io_win32.c"
|
||||
# include "src/libethash/mmap_win32.c"
|
||||
#else
|
||||
# include "src/libethash/io_posix.c"
|
||||
#endif
|
||||
|
||||
// 'gateway function' for calling back into go.
|
||||
extern int ethashGoCallback(unsigned);
|
||||
int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
|
||||
|
||||
*/
|
||||
import "C"
|
47
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
47
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
@ -1,23 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
sources = [
|
||||
'src/python/core.c',
|
||||
'src/libethash/io.c',
|
||||
'src/libethash/internal.c',
|
||||
'src/libethash/sha3.c']
|
||||
if os.name == 'nt':
|
||||
sources += [
|
||||
'src/libethash/util_win32.c',
|
||||
'src/libethash/io_win32.c',
|
||||
'src/libethash/mmap_win32.c',
|
||||
]
|
||||
else:
|
||||
sources += [
|
||||
'src/libethash/io_posix.c'
|
||||
]
|
||||
depends = [
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/compiler.h',
|
||||
'src/libethash/data_sizes.h',
|
||||
'src/libethash/endian.h',
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/io.h',
|
||||
'src/libethash/fnv.h',
|
||||
'src/libethash/internal.h',
|
||||
'src/libethash/sha3.h',
|
||||
'src/libethash/util.h',
|
||||
]
|
||||
pyethash = Extension('pyethash',
|
||||
sources=[
|
||||
'src/python/core.c',
|
||||
'src/libethash/util.c',
|
||||
'src/libethash/internal.c',
|
||||
'src/libethash/sha3.c'],
|
||||
depends=[
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/compiler.h',
|
||||
'src/libethash/data_sizes.h',
|
||||
'src/libethash/endian.h',
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/fnv.h',
|
||||
'src/libethash/internal.h',
|
||||
'src/libethash/sha3.h',
|
||||
'src/libethash/util.h'
|
||||
],
|
||||
sources=sources,
|
||||
depends=depends,
|
||||
extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
|
||||
|
||||
setup(
|
||||
|
44
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/CMakeLists.txt
generated
vendored
44
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/CMakeLists.txt
generated
vendored
@ -3,56 +3,56 @@ include_directories(..)
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
|
||||
if (MSVC)
|
||||
add_definitions("/openmp")
|
||||
add_definitions("/openmp")
|
||||
endif()
|
||||
|
||||
# enable C++11, should probably be a bit more specific about compiler
|
||||
if (NOT MSVC)
|
||||
SET(CMAKE_CXX_FLAGS "-std=c++11")
|
||||
SET(CMAKE_CXX_FLAGS "-std=c++11")
|
||||
endif()
|
||||
|
||||
if (NOT MPI_FOUND)
|
||||
find_package(MPI)
|
||||
find_package(MPI)
|
||||
endif()
|
||||
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
find_package(CryptoPP 5.6.2)
|
||||
find_package(CryptoPP 5.6.2)
|
||||
endif()
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
find_package (Threads REQUIRED)
|
||||
endif()
|
||||
|
||||
if (NOT OpenCL_FOUND)
|
||||
find_package(OpenCL)
|
||||
find_package(OpenCL)
|
||||
endif()
|
||||
if (OpenCL_FOUND)
|
||||
add_definitions(-DWITH_OPENCL)
|
||||
include_directories(${OpenCL_INCLUDE_DIRS})
|
||||
list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h)
|
||||
add_definitions(-DWITH_OPENCL)
|
||||
include_directories(${OpenCL_INCLUDE_DIRS})
|
||||
list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h)
|
||||
endif()
|
||||
|
||||
if (MPI_FOUND)
|
||||
include_directories(${MPI_INCLUDE_PATH})
|
||||
add_executable (Benchmark_MPI_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI")
|
||||
include_directories(${MPI_INCLUDE_PATH})
|
||||
add_executable (Benchmark_MPI_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI")
|
||||
|
||||
add_executable (Benchmark_MPI_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI")
|
||||
add_executable (Benchmark_MPI_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI")
|
||||
endif()
|
||||
|
||||
add_executable (Benchmark_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS})
|
||||
target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL")
|
||||
|
||||
add_executable (Benchmark_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS})
|
||||
target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if (OpenCL_FOUND)
|
||||
add_executable (Benchmark_CL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl)
|
||||
SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL")
|
||||
add_executable (Benchmark_CL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL")
|
||||
endif()
|
||||
|
||||
|
55
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/benchmark.cpp
generated
vendored
55
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/benchmark.cpp
generated
vendored
@ -96,6 +96,11 @@ static std::string bytesToHexString(uint8_t const* bytes, unsigned size)
|
||||
return str;
|
||||
}
|
||||
|
||||
static std::string bytesToHexString(ethash_h256_t const *hash, unsigned size)
|
||||
{
|
||||
return bytesToHexString((uint8_t*)hash, size);
|
||||
}
|
||||
|
||||
extern "C" int main(void)
|
||||
{
|
||||
// params for ethash
|
||||
@ -106,8 +111,8 @@ extern "C" int main(void)
|
||||
//params.full_size = 8209 * 4096; // 8MBish;
|
||||
//params.cache_size = 8209*4096;
|
||||
//params.cache_size = 2053*4096;
|
||||
ethash_blockhash_t seed;
|
||||
ethash_blockhash_t previous_hash;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t previous_hash;
|
||||
|
||||
memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32);
|
||||
memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32);
|
||||
@ -126,20 +131,20 @@ extern "C" int main(void)
|
||||
// compute cache or full data
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_mkcache(&cache, ¶ms, seed);
|
||||
ethash_mkcache(&cache, ¶ms, &seed);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
|
||||
ethash_blockhash_t cache_hash;
|
||||
ethash_h256_t cache_hash;
|
||||
SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size);
|
||||
debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(cache_hash,sizeof(cache_hash)).data());
|
||||
debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(&cache_hash, sizeof(cache_hash)).data());
|
||||
|
||||
// print a couple of test hashes
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, previous_hash, 0);
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(hash.result, 32).data());
|
||||
debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(&hash.result, 32).data());
|
||||
}
|
||||
|
||||
#ifdef FULL
|
||||
@ -154,34 +159,34 @@ extern "C" int main(void)
|
||||
ethash_cl_miner miner;
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
if (!miner.init(params, seed))
|
||||
if (!miner.init(params, &seed))
|
||||
exit(-1);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_cl_miner init: %ums\n", (unsigned)time);
|
||||
debugf("ethash_cl_miner init: %ums\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef FULL
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_full(&hash, full_mem, ¶ms, previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_full test: %uns, %s\n", (unsigned)time);
|
||||
}
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_full test: %uns\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef OPENCL
|
||||
// validate 1024 hashes against CPU
|
||||
miner.hash(g_hashes, previous_hash, 0, 1024);
|
||||
miner.hash(g_hashes, (uint8_t*)&previous_hash, 0, 1024);
|
||||
for (unsigned i = 0; i != 1024; ++i)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, previous_hash, i);
|
||||
if (memcmp(hash.result, g_hashes + 32*i, 32) != 0)
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, i);
|
||||
if (memcmp(&hash.result, g_hashes + 32*i, 32) != 0)
|
||||
{
|
||||
debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(hash.result, 32).c_str());
|
||||
debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(&hash.result, 32).c_str());
|
||||
static unsigned c = 0;
|
||||
if (++c == 16)
|
||||
{
|
||||
@ -220,14 +225,14 @@ extern "C" int main(void)
|
||||
search_hook hook;
|
||||
hook.hash_count = 0;
|
||||
|
||||
miner.search(previous_hash, 0x000000ffffffffff, hook);
|
||||
miner.search((uint8_t*)&previous_hash, 0x000000ffffffffff, hook);
|
||||
|
||||
for (unsigned i = 0; i != hook.nonce_vec.size(); ++i)
|
||||
{
|
||||
uint64_t nonce = hook.nonce_vec[i];
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, previous_hash, nonce);
|
||||
debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(hash.result, 32).c_str());
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(&hash.result, 32).c_str());
|
||||
}
|
||||
|
||||
hash_count = hook.hash_count;
|
||||
@ -239,9 +244,9 @@ extern "C" int main(void)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
#ifdef FULL
|
||||
ethash_full(&hash, full_mem, ¶ms, previous_hash, nonce);
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, nonce);
|
||||
#else
|
||||
ethash_light(&hash, &cache, ¶ms, previous_hash, nonce);
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
#endif // FULL
|
||||
}
|
||||
}
|
||||
@ -249,7 +254,7 @@ extern "C" int main(void)
|
||||
auto time = std::chrono::duration_cast<std::chrono::microseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("Search took: %ums\n", (unsigned)time/1000);
|
||||
|
||||
unsigned read_size = ACCESSES * MIX_BYTES;
|
||||
unsigned read_size = ETHASH_ACCESSES * ETHASH_MIX_BYTES;
|
||||
#if defined(OPENCL) || defined(FULL)
|
||||
debugf(
|
||||
"hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n",
|
||||
|
2
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/CMakeLists.txt
generated
vendored
2
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/CMakeLists.txt
generated
vendored
@ -13,7 +13,7 @@ if (NOT MSVC)
|
||||
set(CMAKE_CXX_FLAGS "-Wall -std=c++11")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g")
|
||||
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O4 -DNDEBUG")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g")
|
||||
|
||||
# Compiler-specific C++11 activation.
|
||||
|
141
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.cpp
generated
vendored
141
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.cpp
generated
vendored
@ -24,12 +24,15 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <assert.h>
|
||||
#include <queue>
|
||||
#include <vector>
|
||||
#include <libethash/util.h>
|
||||
#include <libethash/ethash.h>
|
||||
#include <libethash/internal.h>
|
||||
#include "ethash_cl_miner.h"
|
||||
#include "ethash_cl_miner_kernel.h"
|
||||
#include <libethash/util.h>
|
||||
|
||||
#define ETHASH_BYTES 32
|
||||
|
||||
@ -42,6 +45,8 @@
|
||||
#undef min
|
||||
#undef max
|
||||
|
||||
using namespace std;
|
||||
|
||||
static void add_definition(std::string& source, char const* id, unsigned value)
|
||||
{
|
||||
char buf[256];
|
||||
@ -49,52 +54,108 @@ static void add_definition(std::string& source, char const* id, unsigned value)
|
||||
source.insert(source.begin(), buf, buf + strlen(buf));
|
||||
}
|
||||
|
||||
ethash_cl_miner::search_hook::~search_hook() {}
|
||||
|
||||
ethash_cl_miner::ethash_cl_miner()
|
||||
: m_opencl_1_1()
|
||||
{
|
||||
}
|
||||
|
||||
bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const *seed, unsigned workgroup_size)
|
||||
std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
|
||||
{
|
||||
// store params
|
||||
m_params = params;
|
||||
|
||||
// get all platforms
|
||||
std::vector<cl::Platform> platforms;
|
||||
cl::Platform::get(&platforms);
|
||||
std::vector<cl::Platform> platforms;
|
||||
cl::Platform::get(&platforms);
|
||||
if (platforms.empty())
|
||||
{
|
||||
debugf("No OpenCL platforms found.\n");
|
||||
return false;
|
||||
cout << "No OpenCL platforms found." << endl;
|
||||
return std::string();
|
||||
}
|
||||
|
||||
// use default platform
|
||||
debugf("Using platform: %s\n", platforms[0].getInfo<CL_PLATFORM_NAME>().c_str());
|
||||
|
||||
// get GPU device of the default platform
|
||||
std::vector<cl::Device> devices;
|
||||
platforms[0].getDevices(CL_DEVICE_TYPE_ALL, &devices);
|
||||
if (devices.empty())
|
||||
// get GPU device of the selected platform
|
||||
std::vector<cl::Device> devices;
|
||||
unsigned platform_num = std::min<unsigned>(_platformId, platforms.size() - 1);
|
||||
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
|
||||
if (devices.empty())
|
||||
{
|
||||
debugf("No OpenCL devices found.\n");
|
||||
return false;
|
||||
cout << "No OpenCL devices found." << endl;
|
||||
return std::string();
|
||||
}
|
||||
|
||||
// use default device
|
||||
unsigned device_num = 0;
|
||||
// use selected default device
|
||||
unsigned device_num = std::min<unsigned>(_deviceId, devices.size() - 1);
|
||||
cl::Device& device = devices[device_num];
|
||||
std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
|
||||
debugf("Using device: %s (%s)\n", device.getInfo<CL_DEVICE_NAME>().c_str(),device_version.c_str());
|
||||
|
||||
return "{ \"platform\": \"" + platforms[platform_num].getInfo<CL_PLATFORM_NAME>() + "\", \"device\": \"" + device.getInfo<CL_DEVICE_NAME>() + "\", \"version\": \"" + device_version + "\" }";
|
||||
}
|
||||
|
||||
unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
|
||||
{
|
||||
std::vector<cl::Platform> platforms;
|
||||
cl::Platform::get(&platforms);
|
||||
if (platforms.empty())
|
||||
{
|
||||
cout << "No OpenCL platforms found." << endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::vector<cl::Device> devices;
|
||||
unsigned platform_num = std::min<unsigned>(_platformId, platforms.size() - 1);
|
||||
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
|
||||
if (devices.empty())
|
||||
{
|
||||
cout << "No OpenCL devices found." << endl;
|
||||
return 0;
|
||||
}
|
||||
return devices.size();
|
||||
}
|
||||
|
||||
void ethash_cl_miner::finish()
|
||||
{
|
||||
if (m_queue())
|
||||
m_queue.finish();
|
||||
}
|
||||
|
||||
bool ethash_cl_miner::init(uint64_t block_number, std::function<void(void*)> _fillDAG, unsigned workgroup_size, unsigned _platformId, unsigned _deviceId)
|
||||
{
|
||||
// store params
|
||||
m_fullSize = ethash_get_datasize(block_number);
|
||||
|
||||
// get all platforms
|
||||
std::vector<cl::Platform> platforms;
|
||||
cl::Platform::get(&platforms);
|
||||
if (platforms.empty())
|
||||
{
|
||||
cout << "No OpenCL platforms found." << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// use selected platform
|
||||
_platformId = std::min<unsigned>(_platformId, platforms.size() - 1);
|
||||
|
||||
cout << "Using platform: " << platforms[_platformId].getInfo<CL_PLATFORM_NAME>().c_str() << endl;
|
||||
|
||||
// get GPU device of the default platform
|
||||
std::vector<cl::Device> devices;
|
||||
platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
|
||||
if (devices.empty())
|
||||
{
|
||||
cout << "No OpenCL devices found." << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// use selected device
|
||||
cl::Device& device = devices[std::min<unsigned>(_deviceId, devices.size() - 1)];
|
||||
std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
|
||||
cout << "Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")" << endl;
|
||||
|
||||
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
|
||||
{
|
||||
debugf("OpenCL 1.0 is not supported.\n");
|
||||
cout << "OpenCL 1.0 is not supported." << endl;
|
||||
return false;
|
||||
}
|
||||
if (strncmp("OpenCL 1.1", device_version.c_str(), 10) == 0)
|
||||
{
|
||||
m_opencl_1_1 = true;
|
||||
}
|
||||
|
||||
// create context
|
||||
m_context = cl::Context(std::vector<cl::Device>(&device, &device + 1));
|
||||
@ -106,8 +167,8 @@ bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const
|
||||
// patch source code
|
||||
std::string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
|
||||
add_definition(code, "GROUP_SIZE", m_workgroup_size);
|
||||
add_definition(code, "DAG_SIZE", (unsigned)(params.full_size / MIX_BYTES));
|
||||
add_definition(code, "ACCESSES", ACCESSES);
|
||||
add_definition(code, "DAG_SIZE", (unsigned)(m_fullSize / ETHASH_MIX_BYTES));
|
||||
add_definition(code, "ACCESSES", ETHASH_ACCESSES);
|
||||
add_definition(code, "MAX_OUTPUTS", c_max_search_results);
|
||||
//debugf("%s", code.c_str());
|
||||
|
||||
@ -122,31 +183,25 @@ bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const
|
||||
}
|
||||
catch (cl::Error err)
|
||||
{
|
||||
debugf("%s\n", program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
|
||||
cout << program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str();
|
||||
return false;
|
||||
}
|
||||
m_hash_kernel = cl::Kernel(program, "ethash_hash");
|
||||
m_search_kernel = cl::Kernel(program, "ethash_search");
|
||||
|
||||
// create buffer for dag
|
||||
m_dag = cl::Buffer(m_context, CL_MEM_READ_ONLY, params.full_size);
|
||||
m_dag = cl::Buffer(m_context, CL_MEM_READ_ONLY, m_fullSize);
|
||||
|
||||
// create buffer for header
|
||||
m_header = cl::Buffer(m_context, CL_MEM_READ_ONLY, 32);
|
||||
|
||||
// compute dag on CPU
|
||||
{
|
||||
void* cache_mem = malloc(params.cache_size + 63);
|
||||
ethash_cache cache;
|
||||
cache.mem = (void*)(((uintptr_t)cache_mem + 63) & ~63);
|
||||
ethash_mkcache(&cache, ¶ms, seed);
|
||||
|
||||
// if this throws then it's because we probably need to subdivide the dag uploads for compatibility
|
||||
void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, params.full_size);
|
||||
ethash_compute_full_data(dag_ptr, ¶ms, &cache);
|
||||
void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, m_fullSize);
|
||||
// memcpying 1GB: horrible... really. horrible. but necessary since we can't mmap *and* gpumap.
|
||||
_fillDAG(dag_ptr);
|
||||
m_queue.enqueueUnmapMemObject(m_dag, dag_ptr);
|
||||
|
||||
free(cache_mem);
|
||||
}
|
||||
|
||||
// create mining buffers
|
||||
@ -191,8 +246,8 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
|
||||
// how many this batch
|
||||
if (i < count)
|
||||
{
|
||||
unsigned const this_count = std::min(count - i, c_hash_batch_size);
|
||||
unsigned const batch_count = std::max(this_count, m_workgroup_size);
|
||||
unsigned const this_count = std::min<unsigned>(count - i, c_hash_batch_size);
|
||||
unsigned const batch_count = std::max<unsigned>(this_count, m_workgroup_size);
|
||||
|
||||
// supply output hash buffer to kernel
|
||||
m_hash_kernel.setArg(0, m_hash_buf[buf]);
|
||||
@ -245,7 +300,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
|
||||
m_queue.enqueueWriteBuffer(m_search_buf[i], false, 0, 4, &c_zero);
|
||||
}
|
||||
|
||||
#if CL_VERSION_1_2
|
||||
#if CL_VERSION_1_2 && 0
|
||||
cl::Event pre_return_event;
|
||||
if (!m_opencl_1_1)
|
||||
{
|
||||
@ -295,7 +350,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
|
||||
|
||||
// could use pinned host pointer instead
|
||||
uint32_t* results = (uint32_t*)m_queue.enqueueMapBuffer(m_search_buf[batch.buf], true, CL_MAP_READ, 0, (1+c_max_search_results) * sizeof(uint32_t));
|
||||
unsigned num_found = std::min(results[0], c_max_search_results);
|
||||
unsigned num_found = std::min<unsigned>(results[0], c_max_search_results);
|
||||
|
||||
uint64_t nonces[c_max_search_results];
|
||||
for (unsigned i = 0; i != num_found; ++i)
|
||||
@ -319,7 +374,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
|
||||
}
|
||||
|
||||
// not safe to return until this is ready
|
||||
#if CL_VERSION_1_2
|
||||
#if CL_VERSION_1_2 && 0
|
||||
if (!m_opencl_1_1)
|
||||
{
|
||||
pre_return_event.wait();
|
||||
|
24
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.h
generated
vendored
24
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.h
generated
vendored
@ -2,8 +2,18 @@
|
||||
|
||||
#define __CL_ENABLE_EXCEPTIONS
|
||||
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
|
||||
|
||||
#if defined(__clang__)
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wunused-parameter"
|
||||
#include "cl.hpp"
|
||||
#pragma clang diagnostic pop
|
||||
#else
|
||||
#include "cl.hpp"
|
||||
#endif
|
||||
|
||||
#include <time.h>
|
||||
#include <functional>
|
||||
#include <libethash/ethash.h>
|
||||
|
||||
class ethash_cl_miner
|
||||
@ -11,6 +21,8 @@ class ethash_cl_miner
|
||||
public:
|
||||
struct search_hook
|
||||
{
|
||||
virtual ~search_hook(); // always a virtual destructor for a class with virtuals.
|
||||
|
||||
// reports progress, return true to abort
|
||||
virtual bool found(uint64_t const* nonces, uint32_t count) = 0;
|
||||
virtual bool searched(uint64_t start_nonce, uint32_t count) = 0;
|
||||
@ -19,19 +31,19 @@ public:
|
||||
public:
|
||||
ethash_cl_miner();
|
||||
|
||||
bool init(ethash_params const& params, ethash_blockhash_t const *seed, unsigned workgroup_size = 64);
|
||||
bool init(uint64_t block_number, std::function<void(void*)> _fillDAG, unsigned workgroup_size = 64, unsigned _platformId = 0, unsigned _deviceId = 0);
|
||||
static std::string platform_info(unsigned _platformId = 0, unsigned _deviceId = 0);
|
||||
static unsigned get_num_devices(unsigned _platformId = 0);
|
||||
|
||||
|
||||
void finish();
|
||||
void hash(uint8_t* ret, uint8_t const* header, uint64_t nonce, unsigned count);
|
||||
void search(uint8_t const* header, uint64_t target, search_hook& hook);
|
||||
|
||||
private:
|
||||
static unsigned const c_max_search_results = 63;
|
||||
static unsigned const c_num_buffers = 2;
|
||||
static unsigned const c_hash_batch_size = 1024;
|
||||
static unsigned const c_search_batch_size = 1024*256;
|
||||
enum { c_max_search_results = 63, c_num_buffers = 2, c_hash_batch_size = 1024, c_search_batch_size = 1024*256 };
|
||||
|
||||
ethash_params m_params;
|
||||
uint64_t m_fullSize;
|
||||
cl::Context m_context;
|
||||
cl::CommandQueue m_queue;
|
||||
cl::Kernel m_hash_kernel;
|
||||
|
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner_kernel.cl
generated
vendored
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner_kernel.cl
generated
vendored
@ -415,8 +415,7 @@ __kernel void ethash_search_simple(
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
@ -453,7 +452,7 @@ __kernel void ethash_search(
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
|
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/CMakeLists.txt
generated
vendored
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/CMakeLists.txt
generated
vendored
@ -10,8 +10,7 @@ if (NOT MSVC)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
|
||||
endif()
|
||||
|
||||
set(FILES util.c
|
||||
util.h
|
||||
set(FILES util.h
|
||||
io.c
|
||||
internal.c
|
||||
ethash.h
|
||||
@ -21,7 +20,7 @@ set(FILES util.c
|
||||
data_sizes.h)
|
||||
|
||||
if (MSVC)
|
||||
list(APPEND FILES io_win32.c)
|
||||
list(APPEND FILES util_win32.c io_win32.c mmap_win32.c)
|
||||
else()
|
||||
list(APPEND FILES io_posix.c)
|
||||
endif()
|
||||
|
1474
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/data_sizes.h
generated
vendored
1474
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/data_sizes.h
generated
vendored
File diff suppressed because it is too large
Load Diff
72
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
72
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
@ -3,38 +3,6 @@
|
||||
#include <stdint.h>
|
||||
#include "compiler.h"
|
||||
|
||||
static const uint8_t BitReverseTable256[] =
|
||||
{
|
||||
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
|
||||
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
|
||||
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
|
||||
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
|
||||
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
|
||||
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
|
||||
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
|
||||
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
|
||||
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
|
||||
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
|
||||
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
|
||||
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
|
||||
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
|
||||
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
|
||||
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
|
||||
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
|
||||
};
|
||||
|
||||
static inline uint32_t bitfn_swap32(uint32_t a) {
|
||||
return (BitReverseTable256[a & 0xff] << 24) |
|
||||
(BitReverseTable256[(a >> 8) & 0xff] << 16) |
|
||||
(BitReverseTable256[(a >> 16) & 0xff] << 8) |
|
||||
(BitReverseTable256[(a >> 24) & 0xff]);
|
||||
}
|
||||
|
||||
static inline uint64_t bitfn_swap64(uint64_t a) {
|
||||
return ((uint64_t) bitfn_swap32((uint32_t) (a >> 32))) |
|
||||
(((uint64_t) bitfn_swap32((uint32_t) a)) << 32);
|
||||
}
|
||||
|
||||
#if defined(__MINGW32__) || defined(_WIN32)
|
||||
# define LITTLE_ENDIAN 1234
|
||||
# define BYTE_ORDER LITTLE_ENDIAN
|
||||
@ -53,21 +21,51 @@ static inline uint64_t bitfn_swap64(uint64_t a) {
|
||||
# define BIG_ENDIAN 1234
|
||||
# define BYTE_ORDER BIG_ENDIAN
|
||||
#else
|
||||
|
||||
# include <endian.h>
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <stdlib.h>
|
||||
#define ethash_swap_u32(input_) _byteswap_ulong(input_)
|
||||
#define ethash_swap_u64(input_) _byteswap_uint64(input_)
|
||||
#elif defined(__APPLE__)
|
||||
#include <libkern/OSByteOrder.h>
|
||||
#define ethash_swap_u32(input_) OSSwapInt32(input_)
|
||||
#define ethash_swap_u64(input_) OSSwapInt64(input_)
|
||||
#else // posix
|
||||
#include <byteswap.h>
|
||||
#define ethash_swap_u32(input_) __bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) __bswap_64(input_)
|
||||
#endif
|
||||
|
||||
|
||||
#if LITTLE_ENDIAN == BYTE_ORDER
|
||||
|
||||
#define fix_endian32(x) (x)
|
||||
#define fix_endian64(x) (x)
|
||||
#define fix_endian32(dst_ ,src_) dst_ = src_
|
||||
#define fix_endian32_same(val_)
|
||||
#define fix_endian64(dst_, src_) dst_ = src_
|
||||
#define fix_endian64_same(val_)
|
||||
#define fix_endian_arr32(arr_, size_)
|
||||
#define fix_endian_arr64(arr_, size_)
|
||||
|
||||
#elif BIG_ENDIAN == BYTE_ORDER
|
||||
|
||||
#define fix_endian32(x) bitfn_swap32(x)
|
||||
#define fix_endian64(x) bitfn_swap64(x)
|
||||
#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
|
||||
#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
|
||||
#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_
|
||||
#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
|
||||
#define fix_endian_arr32(arr_, size_) \
|
||||
do { \
|
||||
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
|
||||
arr_[i_] = ethash_swap_u32(arr_[i_]); \
|
||||
} \
|
||||
while (0)
|
||||
#define fix_endian_arr64(arr_, size_) \
|
||||
do { \
|
||||
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
|
||||
arr_[i_] = ethash_swap_u64(arr_[i_]); \
|
||||
} \
|
||||
while (0) \
|
||||
|
||||
#else
|
||||
# error "endian not supported"
|
||||
|
207
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/ethash.h
generated
vendored
207
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/ethash.h
generated
vendored
@ -26,124 +26,121 @@
|
||||
#include <stddef.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#define REVISION 23
|
||||
#define DATASET_BYTES_INIT 1073741824U // 2**30
|
||||
#define DATASET_BYTES_GROWTH 8388608U // 2**23
|
||||
#define CACHE_BYTES_INIT 1073741824U // 2**24
|
||||
#define CACHE_BYTES_GROWTH 131072U // 2**17
|
||||
#define EPOCH_LENGTH 30000U
|
||||
#define MIX_BYTES 128
|
||||
#define HASH_BYTES 64
|
||||
#define DATASET_PARENTS 256
|
||||
#define CACHE_ROUNDS 3
|
||||
#define ACCESSES 64
|
||||
#define ETHASH_REVISION 23
|
||||
#define ETHASH_DATASET_BYTES_INIT 1073741824U // 2**30
|
||||
#define ETHASH_DATASET_BYTES_GROWTH 8388608U // 2**23
|
||||
#define ETHASH_CACHE_BYTES_INIT 1073741824U // 2**24
|
||||
#define ETHASH_CACHE_BYTES_GROWTH 131072U // 2**17
|
||||
#define ETHASH_EPOCH_LENGTH 30000U
|
||||
#define ETHASH_MIX_BYTES 128
|
||||
#define ETHASH_HASH_BYTES 64
|
||||
#define ETHASH_DATASET_PARENTS 256
|
||||
#define ETHASH_CACHE_ROUNDS 3
|
||||
#define ETHASH_ACCESSES 64
|
||||
#define ETHASH_DAG_MAGIC_NUM_SIZE 8
|
||||
#define ETHASH_DAG_MAGIC_NUM 0xFEE1DEADBADDCAFE
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct ethash_params {
|
||||
uint64_t full_size; // Size of full data set (in bytes, multiple of mix size (128)).
|
||||
uint64_t cache_size; // Size of compute cache (in bytes, multiple of node size (64)).
|
||||
} ethash_params;
|
||||
/// Type of a seedhash/blockhash e.t.c.
|
||||
typedef struct ethash_h256 { uint8_t b[32]; } ethash_h256_t;
|
||||
|
||||
/// Type of a blockhash
|
||||
typedef struct ethash_blockhash { uint8_t b[32]; } ethash_blockhash_t;
|
||||
static inline uint8_t ethash_blockhash_get(ethash_blockhash_t const* hash, unsigned int i)
|
||||
{
|
||||
return hash->b[i];
|
||||
}
|
||||
// convenience macro to statically initialize an h256_t
|
||||
// usage:
|
||||
// ethash_h256_t a = ethash_h256_static_init(1, 2, 3, ... )
|
||||
// have to provide all 32 values. If you don't provide all the rest
|
||||
// will simply be unitialized (not guranteed to be 0)
|
||||
#define ethash_h256_static_init(...) \
|
||||
{ {__VA_ARGS__} }
|
||||
|
||||
static inline void ethash_blockhash_set(ethash_blockhash_t *hash, unsigned int i, uint8_t v)
|
||||
{
|
||||
hash->b[i] = v;
|
||||
}
|
||||
|
||||
static inline void ethash_blockhash_reset(ethash_blockhash_t *hash)
|
||||
{
|
||||
memset(hash, 0, 32);
|
||||
}
|
||||
struct ethash_light;
|
||||
typedef struct ethash_light* ethash_light_t;
|
||||
struct ethash_full;
|
||||
typedef struct ethash_full* ethash_full_t;
|
||||
typedef int(*ethash_callback_t)(unsigned);
|
||||
|
||||
typedef struct ethash_return_value {
|
||||
ethash_blockhash_t result;
|
||||
ethash_blockhash_t mix_hash;
|
||||
} ethash_return_value;
|
||||
ethash_h256_t result;
|
||||
ethash_h256_t mix_hash;
|
||||
bool success;
|
||||
} ethash_return_value_t;
|
||||
|
||||
uint64_t ethash_get_datasize(const uint32_t block_number);
|
||||
uint64_t ethash_get_cachesize(const uint32_t block_number);
|
||||
/**
|
||||
* Allocate and initialize a new ethash_light handler
|
||||
*
|
||||
* @param block_number The block number for which to create the handler
|
||||
* @return Newly allocated ethash_light handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
|
||||
*/
|
||||
ethash_light_t ethash_light_new(uint64_t block_number);
|
||||
/**
|
||||
* Frees a previously allocated ethash_light handler
|
||||
* @param light The light handler to free
|
||||
*/
|
||||
void ethash_light_delete(ethash_light_t light);
|
||||
/**
|
||||
* Calculate the light client data
|
||||
*
|
||||
* @param light The light client handler
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return an object of ethash_return_value_t holding the return values
|
||||
*/
|
||||
ethash_return_value_t ethash_light_compute(
|
||||
ethash_light_t light,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
|
||||
// initialize the parameters
|
||||
static inline void ethash_params_init(ethash_params *params, const uint32_t block_number) {
|
||||
params->full_size = ethash_get_datasize(block_number);
|
||||
params->cache_size = ethash_get_cachesize(block_number);
|
||||
}
|
||||
/**
|
||||
* Allocate and initialize a new ethash_full handler
|
||||
*
|
||||
* @param light The light handler containing the cache.
|
||||
* @param callback A callback function with signature of @ref ethash_callback_t
|
||||
* It accepts an unsigned with which a progress of DAG calculation
|
||||
* can be displayed. If all goes well the callback should return 0.
|
||||
* If a non-zero value is returned then DAG generation will stop.
|
||||
* Be advised. A progress value of 100 means that DAG creation is
|
||||
* almost complete and that this function will soon return succesfully.
|
||||
* It does not mean that the function has already had a succesfull return.
|
||||
* @return Newly allocated ethash_full handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
|
||||
*/
|
||||
ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback);
|
||||
|
||||
typedef struct ethash_cache {
|
||||
void *mem;
|
||||
} ethash_cache;
|
||||
/**
|
||||
* Frees a previously allocated ethash_full handler
|
||||
* @param full The light handler to free
|
||||
*/
|
||||
void ethash_full_delete(ethash_full_t full);
|
||||
/**
|
||||
* Calculate the full client data
|
||||
*
|
||||
* @param full The full client handler
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return An object of ethash_return_value to hold the return value
|
||||
*/
|
||||
ethash_return_value_t ethash_full_compute(
|
||||
ethash_full_t full,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
/**
|
||||
* Get a pointer to the full DAG data
|
||||
*/
|
||||
void const* ethash_full_dag(ethash_full_t full);
|
||||
/**
|
||||
* Get the size of the DAG data
|
||||
*/
|
||||
uint64_t ethash_full_dag_size(ethash_full_t full);
|
||||
|
||||
void ethash_mkcache(ethash_cache *cache, ethash_params const *params, ethash_blockhash_t const *seed);
|
||||
void ethash_compute_full_data(void *mem, ethash_params const *params, ethash_cache const *cache);
|
||||
void ethash_full(ethash_return_value *ret,
|
||||
void const *full_mem,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce);
|
||||
void ethash_light(ethash_return_value *ret,
|
||||
ethash_cache const *cache,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce);
|
||||
void ethash_get_seedhash(ethash_blockhash_t *seedhash, const uint32_t block_number);
|
||||
|
||||
static inline void ethash_prep_light(void *cache, ethash_params const *params, ethash_blockhash_t const* seed)
|
||||
{
|
||||
ethash_cache c;
|
||||
c.mem = cache;
|
||||
ethash_mkcache(&c, params, seed);
|
||||
}
|
||||
|
||||
static inline void ethash_compute_light(ethash_return_value *ret, void const *cache, ethash_params const *params, ethash_blockhash_t const *header_hash, const uint64_t nonce)
|
||||
{
|
||||
ethash_cache c;
|
||||
c.mem = (void *) cache;
|
||||
ethash_light(ret, &c, params, header_hash, nonce);
|
||||
}
|
||||
|
||||
static inline void ethash_prep_full(void *full, ethash_params const *params, void const *cache)
|
||||
{
|
||||
ethash_cache c;
|
||||
c.mem = (void *) cache;
|
||||
ethash_compute_full_data(full, params, &c);
|
||||
}
|
||||
|
||||
static inline void ethash_compute_full(ethash_return_value *ret,
|
||||
void const *full,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce)
|
||||
{
|
||||
ethash_full(ret, full, params, header_hash, nonce);
|
||||
}
|
||||
|
||||
// Returns if hash is less than or equal to difficulty
|
||||
static inline int ethash_check_difficulty(ethash_blockhash_t const *hash,
|
||||
ethash_blockhash_t const *difficulty)
|
||||
{
|
||||
// Difficulty is big endian
|
||||
for (int i = 0; i < 32; i++) {
|
||||
if (ethash_blockhash_get(hash, i) == ethash_blockhash_get(difficulty, i)) {
|
||||
continue;
|
||||
}
|
||||
return ethash_blockhash_get(hash, i) < ethash_blockhash_get(difficulty, i);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ethash_quick_check_difficulty(ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_blockhash_t const *mix_hash,
|
||||
ethash_blockhash_t const *difficulty);
|
||||
/**
|
||||
* Calculate the seedhash for a given block number
|
||||
*/
|
||||
ethash_h256_t ethash_get_seedhash(uint64_t block_number);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
5
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
@ -29,8 +29,9 @@ extern "C" {
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
static inline uint32_t fnv_hash(const uint32_t x, const uint32_t y) {
|
||||
return x*FNV_PRIME ^ y;
|
||||
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
644
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
generated
vendored
644
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
generated
vendored
@ -8,11 +8,11 @@
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file internal.c
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
@ -23,11 +23,15 @@
|
||||
#include <assert.h>
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
#include "mmap.h"
|
||||
#include "ethash.h"
|
||||
#include "fnv.h"
|
||||
#include "endian.h"
|
||||
#include "internal.h"
|
||||
#include "data_sizes.h"
|
||||
#include "io.h"
|
||||
|
||||
#ifdef WITH_CRYPTOPP
|
||||
|
||||
@ -37,274 +41,456 @@
|
||||
#include "sha3.h"
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
uint64_t ethash_get_datasize(const uint32_t block_number) {
|
||||
assert(block_number / EPOCH_LENGTH < 2048);
|
||||
return dag_sizes[block_number / EPOCH_LENGTH];
|
||||
uint64_t ethash_get_datasize(uint64_t const block_number)
|
||||
{
|
||||
assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
|
||||
return dag_sizes[block_number / ETHASH_EPOCH_LENGTH];
|
||||
}
|
||||
|
||||
uint64_t ethash_get_cachesize(const uint32_t block_number) {
|
||||
assert(block_number / EPOCH_LENGTH < 2048);
|
||||
return cache_sizes[block_number / EPOCH_LENGTH];
|
||||
uint64_t ethash_get_cachesize(uint64_t const block_number)
|
||||
{
|
||||
assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
|
||||
return cache_sizes[block_number / ETHASH_EPOCH_LENGTH];
|
||||
}
|
||||
|
||||
// Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014)
|
||||
// https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf
|
||||
// SeqMemoHash(s, R, N)
|
||||
void static ethash_compute_cache_nodes(node *const nodes,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const* seed)
|
||||
bool static ethash_compute_cache_nodes(
|
||||
node* const nodes,
|
||||
uint64_t cache_size,
|
||||
ethash_h256_t const* seed
|
||||
)
|
||||
{
|
||||
assert((params->cache_size % sizeof(node)) == 0);
|
||||
uint32_t const num_nodes = (uint32_t) (params->cache_size / sizeof(node));
|
||||
if (cache_size % sizeof(node) != 0) {
|
||||
return false;
|
||||
}
|
||||
uint32_t const num_nodes = (uint32_t) (cache_size / sizeof(node));
|
||||
|
||||
SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
|
||||
SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
|
||||
|
||||
for (unsigned i = 1; i != num_nodes; ++i) {
|
||||
SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
|
||||
}
|
||||
for (uint32_t i = 1; i != num_nodes; ++i) {
|
||||
SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
|
||||
}
|
||||
|
||||
for (unsigned j = 0; j != CACHE_ROUNDS; j++) {
|
||||
for (unsigned i = 0; i != num_nodes; i++) {
|
||||
uint32_t const idx = nodes[i].words[0] % num_nodes;
|
||||
node data;
|
||||
data = nodes[(num_nodes - 1 + i) % num_nodes];
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
data.words[w] ^= nodes[idx].words[w];
|
||||
}
|
||||
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
|
||||
}
|
||||
}
|
||||
for (uint32_t j = 0; j != ETHASH_CACHE_ROUNDS; j++) {
|
||||
for (uint32_t i = 0; i != num_nodes; i++) {
|
||||
uint32_t const idx = nodes[i].words[0] % num_nodes;
|
||||
node data;
|
||||
data = nodes[(num_nodes - 1 + i) % num_nodes];
|
||||
for (uint32_t w = 0; w != NODE_WORDS; ++w) {
|
||||
data.words[w] ^= nodes[idx].words[w];
|
||||
}
|
||||
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
|
||||
}
|
||||
}
|
||||
|
||||
// now perform endian conversion
|
||||
#if BYTE_ORDER != LITTLE_ENDIAN
|
||||
for (unsigned w = 0; w != (num_nodes*NODE_WORDS); ++w)
|
||||
{
|
||||
nodes->words[w] = fix_endian32(nodes->words[w]);
|
||||
}
|
||||
// now perform endian conversion
|
||||
fix_endian_arr32(nodes->words, num_nodes * NODE_WORDS);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ethash_calculate_dag_item(
|
||||
node* const ret,
|
||||
uint32_t node_index,
|
||||
ethash_light_t const light
|
||||
)
|
||||
{
|
||||
uint32_t num_parent_nodes = (uint32_t) (light->cache_size / sizeof(node));
|
||||
node const* cache_nodes = (node const *) light->cache;
|
||||
node const* init = &cache_nodes[node_index % num_parent_nodes];
|
||||
memcpy(ret, init, sizeof(node));
|
||||
ret->words[0] ^= node_index;
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
__m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = ret->xmm[0];
|
||||
__m128i xmm1 = ret->xmm[1];
|
||||
__m128i xmm2 = ret->xmm[2];
|
||||
__m128i xmm3 = ret->xmm[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
void ethash_mkcache(ethash_cache *cache,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const* seed)
|
||||
{
|
||||
node *nodes = (node *) cache->mem;
|
||||
ethash_compute_cache_nodes(nodes, params, seed);
|
||||
}
|
||||
|
||||
void ethash_calculate_dag_item(node *const ret,
|
||||
const unsigned node_index,
|
||||
const struct ethash_params *params,
|
||||
const struct ethash_cache *cache)
|
||||
{
|
||||
uint32_t num_parent_nodes = (uint32_t) (params->cache_size / sizeof(node));
|
||||
node const *cache_nodes = (node const *) cache->mem;
|
||||
node const *init = &cache_nodes[node_index % num_parent_nodes];
|
||||
|
||||
memcpy(ret, init, sizeof(node));
|
||||
ret->words[0] ^= node_index;
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
|
||||
uint32_t parent_index = fnv_hash(node_index ^ i, ret->words[i % NODE_WORDS]) % num_parent_nodes;
|
||||
node const *parent = &cache_nodes[parent_index];
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
__m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = ret->xmm[0];
|
||||
__m128i xmm1 = ret->xmm[1];
|
||||
__m128i xmm2 = ret->xmm[2];
|
||||
__m128i xmm3 = ret->xmm[3];
|
||||
#endif
|
||||
{
|
||||
xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
|
||||
xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
|
||||
xmm2 = _mm_mullo_epi32(xmm2, fnv_prime);
|
||||
xmm3 = _mm_mullo_epi32(xmm3, fnv_prime);
|
||||
xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]);
|
||||
xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]);
|
||||
xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]);
|
||||
xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]);
|
||||
|
||||
for (unsigned i = 0; i != DATASET_PARENTS; ++i) {
|
||||
uint32_t parent_index = ((node_index ^ i) * FNV_PRIME ^ ret->words[i % NODE_WORDS]) % num_parent_nodes;
|
||||
node const *parent = &cache_nodes[parent_index];
|
||||
// have to write to ret as values are used to compute index
|
||||
ret->xmm[0] = xmm0;
|
||||
ret->xmm[1] = xmm1;
|
||||
ret->xmm[2] = xmm2;
|
||||
ret->xmm[3] = xmm3;
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
}
|
||||
|
||||
bool ethash_compute_full_data(
|
||||
void* mem,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
)
|
||||
{
|
||||
if (full_size % (sizeof(uint32_t) * MIX_WORDS) != 0 ||
|
||||
(full_size % sizeof(node)) != 0) {
|
||||
return false;
|
||||
}
|
||||
uint32_t const max_n = (uint32_t)(full_size / sizeof(node));
|
||||
node* full_nodes = mem;
|
||||
double const progress_change = 1.0f / max_n;
|
||||
double progress = 0.0f;
|
||||
// now compute full nodes
|
||||
for (uint32_t n = 0; n != max_n; ++n) {
|
||||
if (callback &&
|
||||
n % (max_n / 100) == 0 &&
|
||||
callback((unsigned int)(ceil(progress * 100.0f))) != 0) {
|
||||
|
||||
return false;
|
||||
}
|
||||
progress += progress_change;
|
||||
ethash_calculate_dag_item(&(full_nodes[n]), n, light);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ethash_hash(
|
||||
ethash_return_value_t* ret,
|
||||
node const* full_nodes,
|
||||
ethash_light_t const light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t const nonce
|
||||
)
|
||||
{
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// pack hash and nonce together into first 40 bytes of s_mix
|
||||
assert(sizeof(node) * 8 == 512);
|
||||
node s_mix[MIX_NODES + 1];
|
||||
memcpy(s_mix[0].bytes, &header_hash, 32);
|
||||
fix_endian64(s_mix[0].double_words[4], nonce);
|
||||
|
||||
// compute sha3-512 hash and replicate across mix
|
||||
SHA3_512(s_mix->bytes, s_mix->bytes, 40);
|
||||
fix_endian_arr32(s_mix[0].words, 16);
|
||||
|
||||
node* const mix = s_mix + 1;
|
||||
for (uint32_t w = 0; w != MIX_WORDS; ++w) {
|
||||
mix->words[w] = s_mix[0].words[w % NODE_WORDS];
|
||||
}
|
||||
|
||||
unsigned const page_size = sizeof(uint32_t) * MIX_WORDS;
|
||||
unsigned const num_full_pages = (unsigned) (full_size / page_size);
|
||||
|
||||
for (unsigned i = 0; i != ETHASH_ACCESSES; ++i) {
|
||||
uint32_t const index = fnv_hash(s_mix->words[0] ^ i, mix->words[i % MIX_WORDS]) % num_full_pages;
|
||||
|
||||
for (unsigned n = 0; n != MIX_NODES; ++n) {
|
||||
node const* dag_node;
|
||||
if (full_nodes) {
|
||||
dag_node = &full_nodes[MIX_NODES * index + n];
|
||||
} else {
|
||||
node tmp_node;
|
||||
ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, light);
|
||||
dag_node = &tmp_node;
|
||||
}
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
{
|
||||
xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
|
||||
xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
|
||||
xmm2 = _mm_mullo_epi32(xmm2, fnv_prime);
|
||||
xmm3 = _mm_mullo_epi32(xmm3, fnv_prime);
|
||||
xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]);
|
||||
xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]);
|
||||
xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]);
|
||||
xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]);
|
||||
|
||||
// have to write to ret as values are used to compute index
|
||||
ret->xmm[0] = xmm0;
|
||||
ret->xmm[1] = xmm1;
|
||||
ret->xmm[2] = xmm2;
|
||||
ret->xmm[3] = xmm3;
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
|
||||
}
|
||||
}
|
||||
{
|
||||
__m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
|
||||
__m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]);
|
||||
__m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]);
|
||||
__m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]);
|
||||
mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]);
|
||||
mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]);
|
||||
mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]);
|
||||
mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]);
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
}
|
||||
|
||||
// compress mix
|
||||
for (uint32_t w = 0; w != MIX_WORDS; w += 4) {
|
||||
uint32_t reduction = mix->words[w + 0];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
|
||||
mix->words[w / 4] = reduction;
|
||||
}
|
||||
|
||||
fix_endian_arr32(mix->words, MIX_WORDS / 4);
|
||||
memcpy(&ret->mix_hash, mix->bytes, 32);
|
||||
// final Keccak hash
|
||||
SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
|
||||
return true;
|
||||
}
|
||||
|
||||
void ethash_compute_full_data(
|
||||
void *mem,
|
||||
ethash_params const *params,
|
||||
ethash_cache const *cache) {
|
||||
assert((params->full_size % (sizeof(uint32_t) * MIX_WORDS)) == 0);
|
||||
assert((params->full_size % sizeof(node)) == 0);
|
||||
node *full_nodes = mem;
|
||||
|
||||
// now compute full nodes
|
||||
for (unsigned n = 0; n != (params->full_size / sizeof(node)); ++n) {
|
||||
ethash_calculate_dag_item(&(full_nodes[n]), n, params, cache);
|
||||
}
|
||||
void ethash_quick_hash(
|
||||
ethash_h256_t* return_hash,
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t const nonce,
|
||||
ethash_h256_t const* mix_hash
|
||||
)
|
||||
{
|
||||
uint8_t buf[64 + 32];
|
||||
memcpy(buf, header_hash, 32);
|
||||
fix_endian64_same(nonce);
|
||||
memcpy(&(buf[32]), &nonce, 8);
|
||||
SHA3_512(buf, buf, 40);
|
||||
memcpy(&(buf[64]), mix_hash, 32);
|
||||
SHA3_256(return_hash, buf, 64 + 32);
|
||||
}
|
||||
|
||||
static void ethash_hash(ethash_return_value *ret,
|
||||
node const *full_nodes,
|
||||
ethash_cache const *cache,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce)
|
||||
ethash_h256_t ethash_get_seedhash(uint64_t block_number)
|
||||
{
|
||||
ethash_h256_t ret;
|
||||
ethash_h256_reset(&ret);
|
||||
uint64_t const epochs = block_number / ETHASH_EPOCH_LENGTH;
|
||||
for (uint32_t i = 0; i < epochs; ++i)
|
||||
SHA3_256(&ret, (uint8_t*)&ret, 32);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ethash_quick_check_difficulty(
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t const nonce,
|
||||
ethash_h256_t const* mix_hash,
|
||||
ethash_h256_t const* difficulty
|
||||
)
|
||||
{
|
||||
|
||||
assert((params->full_size % MIX_WORDS) == 0);
|
||||
|
||||
// pack hash and nonce together into first 40 bytes of s_mix
|
||||
assert(sizeof(node) * 8 == 512);
|
||||
node s_mix[MIX_NODES + 1];
|
||||
memcpy(s_mix[0].bytes, header_hash, 32);
|
||||
|
||||
#if BYTE_ORDER != LITTLE_ENDIAN
|
||||
s_mix[0].double_words[4] = fix_endian64(nonce);
|
||||
#else
|
||||
s_mix[0].double_words[4] = nonce;
|
||||
#endif
|
||||
|
||||
// compute sha3-512 hash and replicate across mix
|
||||
SHA3_512(s_mix->bytes, s_mix->bytes, 40);
|
||||
|
||||
#if BYTE_ORDER != LITTLE_ENDIAN
|
||||
for (unsigned w = 0; w != 16; ++w) {
|
||||
s_mix[0].words[w] = fix_endian32(s_mix[0].words[w]);
|
||||
}
|
||||
#endif
|
||||
|
||||
node *const mix = s_mix + 1;
|
||||
for (unsigned w = 0; w != MIX_WORDS; ++w) {
|
||||
mix->words[w] = s_mix[0].words[w % NODE_WORDS];
|
||||
}
|
||||
|
||||
unsigned const
|
||||
page_size = sizeof(uint32_t) * MIX_WORDS,
|
||||
num_full_pages = (unsigned) (params->full_size / page_size);
|
||||
|
||||
|
||||
for (unsigned i = 0; i != ACCESSES; ++i) {
|
||||
uint32_t const index = ((s_mix->words[0] ^ i) * FNV_PRIME ^ mix->words[i % MIX_WORDS]) % num_full_pages;
|
||||
|
||||
for (unsigned n = 0; n != MIX_NODES; ++n) {
|
||||
const node *dag_node = &full_nodes[MIX_NODES * index + n];
|
||||
|
||||
if (!full_nodes) {
|
||||
node tmp_node;
|
||||
ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, params, cache);
|
||||
dag_node = &tmp_node;
|
||||
}
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
{
|
||||
__m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
|
||||
__m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]);
|
||||
__m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]);
|
||||
__m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]);
|
||||
mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]);
|
||||
mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]);
|
||||
mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]);
|
||||
mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]);
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// compress mix
|
||||
for (unsigned w = 0; w != MIX_WORDS; w += 4) {
|
||||
uint32_t reduction = mix->words[w + 0];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
|
||||
mix->words[w / 4] = reduction;
|
||||
}
|
||||
|
||||
#if BYTE_ORDER != LITTLE_ENDIAN
|
||||
for (unsigned w = 0; w != MIX_WORDS/4; ++w) {
|
||||
mix->words[w] = fix_endian32(mix->words[w]);
|
||||
}
|
||||
#endif
|
||||
|
||||
memcpy(&ret->mix_hash, mix->bytes, 32);
|
||||
// final Keccak hash
|
||||
SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
|
||||
ethash_h256_t return_hash;
|
||||
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
|
||||
return ethash_check_difficulty(&return_hash, difficulty);
|
||||
}
|
||||
|
||||
void ethash_quick_hash(ethash_blockhash_t *return_hash,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_blockhash_t const *mix_hash)
|
||||
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed)
|
||||
{
|
||||
struct ethash_light *ret;
|
||||
ret = calloc(sizeof(*ret), 1);
|
||||
if (!ret) {
|
||||
return NULL;
|
||||
}
|
||||
ret->cache = malloc((size_t)cache_size);
|
||||
if (!ret->cache) {
|
||||
goto fail_free_light;
|
||||
}
|
||||
node* nodes = (node*)ret->cache;
|
||||
if (!ethash_compute_cache_nodes(nodes, cache_size, seed)) {
|
||||
goto fail_free_cache_mem;
|
||||
}
|
||||
ret->cache_size = cache_size;
|
||||
return ret;
|
||||
|
||||
uint8_t buf[64 + 32];
|
||||
memcpy(buf, header_hash, 32);
|
||||
#if BYTE_ORDER != LITTLE_ENDIAN
|
||||
nonce = fix_endian64(nonce);
|
||||
#endif
|
||||
memcpy(&(buf[32]), &nonce, 8);
|
||||
SHA3_512(buf, buf, 40);
|
||||
memcpy(&(buf[64]), mix_hash, 32);
|
||||
SHA3_256(return_hash, buf, 64 + 32);
|
||||
fail_free_cache_mem:
|
||||
free(ret->cache);
|
||||
fail_free_light:
|
||||
free(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ethash_get_seedhash(ethash_blockhash_t *seedhash, const uint32_t block_number)
|
||||
ethash_light_t ethash_light_new(uint64_t block_number)
|
||||
{
|
||||
ethash_blockhash_reset(seedhash);
|
||||
const uint32_t epochs = block_number / EPOCH_LENGTH;
|
||||
for (uint32_t i = 0; i < epochs; ++i)
|
||||
SHA3_256(seedhash, (uint8_t*)seedhash, 32);
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
|
||||
ethash_light_t ret;
|
||||
ret = ethash_light_new_internal(ethash_get_cachesize(block_number), &seedhash);
|
||||
ret->block_number = block_number;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ethash_quick_check_difficulty(ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_blockhash_t const *mix_hash,
|
||||
ethash_blockhash_t const *difficulty)
|
||||
void ethash_light_delete(ethash_light_t light)
|
||||
{
|
||||
|
||||
ethash_blockhash_t return_hash;
|
||||
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
|
||||
return ethash_check_difficulty(&return_hash, difficulty);
|
||||
if (light->cache) {
|
||||
free(light->cache);
|
||||
}
|
||||
free(light);
|
||||
}
|
||||
|
||||
void ethash_full(ethash_return_value *ret,
|
||||
void const *full_mem,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce)
|
||||
ethash_return_value_t ethash_light_compute_internal(
|
||||
ethash_light_t light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
ethash_hash(ret, (node const *) full_mem, NULL, params, header_hash, nonce);
|
||||
ethash_return_value_t ret;
|
||||
ret.success = true;
|
||||
if (!ethash_hash(&ret, NULL, light, full_size, header_hash, nonce)) {
|
||||
ret.success = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ethash_light(ethash_return_value *ret,
|
||||
ethash_cache const *cache,
|
||||
ethash_params const *params,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce)
|
||||
ethash_return_value_t ethash_light_compute(
|
||||
ethash_light_t light,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
ethash_hash(ret, NULL, cache, params, header_hash, nonce);
|
||||
uint64_t full_size = ethash_get_datasize(light->block_number);
|
||||
return ethash_light_compute_internal(light, full_size, header_hash, nonce);
|
||||
}
|
||||
|
||||
static bool ethash_mmap(struct ethash_full* ret, FILE* f)
|
||||
{
|
||||
int fd;
|
||||
char* mmapped_data;
|
||||
ret->file = f;
|
||||
if ((fd = ethash_fileno(ret->file)) == -1) {
|
||||
return false;
|
||||
}
|
||||
mmapped_data= mmap(
|
||||
NULL,
|
||||
(size_t)ret->file_size + ETHASH_DAG_MAGIC_NUM_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
fd,
|
||||
0
|
||||
);
|
||||
if (mmapped_data == MAP_FAILED) {
|
||||
return false;
|
||||
}
|
||||
ret->data = (node*)(mmapped_data + ETHASH_DAG_MAGIC_NUM_SIZE);
|
||||
return true;
|
||||
}
|
||||
|
||||
ethash_full_t ethash_full_new_internal(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seed_hash,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
)
|
||||
{
|
||||
struct ethash_full* ret;
|
||||
FILE *f = NULL;
|
||||
ret = calloc(sizeof(*ret), 1);
|
||||
if (!ret) {
|
||||
return NULL;
|
||||
}
|
||||
ret->file_size = (size_t)full_size;
|
||||
switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) {
|
||||
case ETHASH_IO_FAIL:
|
||||
goto fail_free_full;
|
||||
case ETHASH_IO_MEMO_MATCH:
|
||||
if (!ethash_mmap(ret, f)) {
|
||||
goto fail_close_file;
|
||||
}
|
||||
return ret;
|
||||
case ETHASH_IO_MEMO_SIZE_MISMATCH:
|
||||
// if a DAG of same filename but unexpected size is found, silently force new file creation
|
||||
if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) {
|
||||
goto fail_free_full;
|
||||
}
|
||||
// fallthrough to the mismatch case here, DO NOT go through match
|
||||
case ETHASH_IO_MEMO_MISMATCH:
|
||||
if (!ethash_mmap(ret, f)) {
|
||||
goto fail_close_file;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ethash_compute_full_data(ret->data, full_size, light, callback)) {
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
|
||||
// after the DAG has been filled then we finalize it by writting the magic number at the beginning
|
||||
if (fseek(f, 0, SEEK_SET) != 0) {
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM;
|
||||
if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
fflush(f); // make sure the magic number IS there
|
||||
return ret;
|
||||
|
||||
fail_free_full_data:
|
||||
// could check that munmap(..) == 0 but even if it did not can't really do anything here
|
||||
munmap(ret->data, (size_t)full_size);
|
||||
fail_close_file:
|
||||
fclose(ret->file);
|
||||
fail_free_full:
|
||||
free(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback)
|
||||
{
|
||||
char strbuf[256];
|
||||
if (!ethash_get_default_dirname(strbuf, 256)) {
|
||||
return NULL;
|
||||
}
|
||||
uint64_t full_size = ethash_get_datasize(light->block_number);
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(light->block_number);
|
||||
return ethash_full_new_internal(strbuf, seedhash, full_size, light, callback);
|
||||
}
|
||||
|
||||
void ethash_full_delete(ethash_full_t full)
|
||||
{
|
||||
// could check that munmap(..) == 0 but even if it did not can't really do anything here
|
||||
munmap(full->data, (size_t)full->file_size);
|
||||
if (full->file) {
|
||||
fclose(full->file);
|
||||
}
|
||||
free(full);
|
||||
}
|
||||
|
||||
ethash_return_value_t ethash_full_compute(
|
||||
ethash_full_t full,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
ethash_return_value_t ret;
|
||||
ret.success = true;
|
||||
if (!ethash_hash(
|
||||
&ret,
|
||||
(node const*)full->data,
|
||||
NULL,
|
||||
full->file_size,
|
||||
header_hash,
|
||||
nonce)) {
|
||||
ret.success = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void const* ethash_full_dag(ethash_full_t full)
|
||||
{
|
||||
return full->data;
|
||||
}
|
||||
|
||||
uint64_t ethash_full_dag_size(ethash_full_t full)
|
||||
{
|
||||
return full->file_size;
|
||||
}
|
||||
|
149
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.h
generated
vendored
149
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.h
generated
vendored
@ -2,6 +2,7 @@
|
||||
#include "compiler.h"
|
||||
#include "endian.h"
|
||||
#include "ethash.h"
|
||||
#include <stdio.h>
|
||||
|
||||
#define ENABLE_SSE 0
|
||||
|
||||
@ -15,14 +16,14 @@ extern "C" {
|
||||
|
||||
// compile time settings
|
||||
#define NODE_WORDS (64/4)
|
||||
#define MIX_WORDS (MIX_BYTES/4)
|
||||
#define MIX_WORDS (ETHASH_MIX_BYTES/4)
|
||||
#define MIX_NODES (MIX_WORDS / NODE_WORDS)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef union node {
|
||||
uint8_t bytes[NODE_WORDS * 4];
|
||||
uint32_t words[NODE_WORDS];
|
||||
uint64_t double_words[NODE_WORDS / 2];
|
||||
uint8_t bytes[NODE_WORDS * 4];
|
||||
uint32_t words[NODE_WORDS];
|
||||
uint64_t double_words[NODE_WORDS / 2];
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
__m128i xmm[NODE_WORDS/4];
|
||||
@ -30,15 +31,139 @@ typedef union node {
|
||||
|
||||
} node;
|
||||
|
||||
void ethash_calculate_dag_item(node *const ret,
|
||||
const unsigned node_index,
|
||||
ethash_params const *params,
|
||||
ethash_cache const *cache);
|
||||
static inline uint8_t ethash_h256_get(ethash_h256_t const* hash, unsigned int i)
|
||||
{
|
||||
return hash->b[i];
|
||||
}
|
||||
|
||||
void ethash_quick_hash(ethash_blockhash_t *return_hash,
|
||||
ethash_blockhash_t const *header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_blockhash_t const *mix_hash);
|
||||
static inline void ethash_h256_set(ethash_h256_t* hash, unsigned int i, uint8_t v)
|
||||
{
|
||||
hash->b[i] = v;
|
||||
}
|
||||
|
||||
static inline void ethash_h256_reset(ethash_h256_t* hash)
|
||||
{
|
||||
memset(hash, 0, 32);
|
||||
}
|
||||
|
||||
// Returns if hash is less than or equal to difficulty
|
||||
static inline int ethash_check_difficulty(
|
||||
ethash_h256_t const* hash,
|
||||
ethash_h256_t const* difficulty
|
||||
)
|
||||
{
|
||||
// Difficulty is big endian
|
||||
for (int i = 0; i < 32; i++) {
|
||||
if (ethash_h256_get(hash, i) == ethash_h256_get(difficulty, i)) {
|
||||
continue;
|
||||
}
|
||||
return ethash_h256_get(hash, i) < ethash_h256_get(difficulty, i);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ethash_quick_check_difficulty(
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t const nonce,
|
||||
ethash_h256_t const* mix_hash,
|
||||
ethash_h256_t const* difficulty
|
||||
);
|
||||
|
||||
struct ethash_light {
|
||||
void* cache;
|
||||
uint64_t cache_size;
|
||||
uint64_t block_number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_light handler. Internal version
|
||||
*
|
||||
* @param cache_size The size of the cache in bytes
|
||||
* @param seed Block seedhash to be used during the computation of the
|
||||
* cache nodes
|
||||
* @return Newly allocated ethash_light handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
|
||||
*/
|
||||
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed);
|
||||
|
||||
/**
|
||||
* Calculate the light client data. Internal version.
|
||||
*
|
||||
* @param light The light client handler
|
||||
* @param full_size The size of the full data in bytes.
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return The resulting hash.
|
||||
*/
|
||||
ethash_return_value_t ethash_light_compute_internal(
|
||||
ethash_light_t light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
|
||||
struct ethash_full {
|
||||
FILE* file;
|
||||
uint64_t file_size;
|
||||
node* data;
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_full handler. Internal version.
|
||||
*
|
||||
* @param dirname The directory in which to put the DAG file.
|
||||
* @param seedhash The seed hash of the block. Used in the DAG file naming.
|
||||
* @param full_size The size of the full data in bytes.
|
||||
* @param cache A cache object to use that was allocated with @ref ethash_cache_new().
|
||||
* Iff this function succeeds the ethash_full_t will take memory
|
||||
* memory ownership of the cache and free it at deletion. If
|
||||
* not then the user still has to handle freeing of the cache himself.
|
||||
* @param callback A callback function with signature of @ref ethash_callback_t
|
||||
* It accepts an unsigned with which a progress of DAG calculation
|
||||
* can be displayed. If all goes well the callback should return 0.
|
||||
* If a non-zero value is returned then DAG generation will stop.
|
||||
* @return Newly allocated ethash_full handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
|
||||
*/
|
||||
ethash_full_t ethash_full_new_internal(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seed_hash,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
);
|
||||
|
||||
void ethash_calculate_dag_item(
|
||||
node* const ret,
|
||||
uint32_t node_index,
|
||||
ethash_light_t const cache
|
||||
);
|
||||
|
||||
void ethash_quick_hash(
|
||||
ethash_h256_t* return_hash,
|
||||
ethash_h256_t const* header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_h256_t const* mix_hash
|
||||
);
|
||||
|
||||
uint64_t ethash_get_datasize(uint64_t const block_number);
|
||||
uint64_t ethash_get_cachesize(uint64_t const block_number);
|
||||
|
||||
/**
|
||||
* Compute the memory data for a full node's memory
|
||||
*
|
||||
* @param mem A pointer to an ethash full's memory
|
||||
* @param full_size The size of the full data in bytes
|
||||
* @param cache A cache object to use in the calculation
|
||||
* @param callback The callback function. Check @ref ethash_full_new() for details.
|
||||
* @return true if all went fine and false for invalid parameters
|
||||
*/
|
||||
bool ethash_compute_full_data(
|
||||
void* mem,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
127
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.c
generated
vendored
127
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.c
generated
vendored
@ -22,68 +22,81 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
// silly macro to save some typing
|
||||
#define PASS_ARR(c_) (c_), sizeof(c_)
|
||||
|
||||
static bool ethash_io_write_file(char const *dirname,
|
||||
char const* filename,
|
||||
size_t filename_length,
|
||||
void const* data,
|
||||
size_t data_size)
|
||||
enum ethash_io_rc ethash_io_prepare(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seedhash,
|
||||
FILE** output_file,
|
||||
uint64_t file_size,
|
||||
bool force_create
|
||||
)
|
||||
{
|
||||
bool ret = false;
|
||||
char *fullname = ethash_io_create_filename(dirname, filename, filename_length);
|
||||
if (!fullname) {
|
||||
return false;
|
||||
}
|
||||
FILE *f = fopen(fullname, "wb");
|
||||
if (!f) {
|
||||
goto free_name;
|
||||
}
|
||||
if (data_size != fwrite(data, 1, data_size, f)) {
|
||||
goto close;
|
||||
}
|
||||
char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
|
||||
enum ethash_io_rc ret = ETHASH_IO_FAIL;
|
||||
|
||||
ret = true;
|
||||
close:
|
||||
fclose(f);
|
||||
free_name:
|
||||
free(fullname);
|
||||
return ret;
|
||||
}
|
||||
// assert directory exists
|
||||
if (!ethash_mkdir(dirname)) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
bool ethash_io_write(char const *dirname,
|
||||
ethash_params const* params,
|
||||
ethash_blockhash_t seedhash,
|
||||
void const* cache,
|
||||
uint8_t **data,
|
||||
uint64_t *data_size)
|
||||
{
|
||||
char info_buffer[DAG_MEMO_BYTESIZE];
|
||||
// allocate the bytes
|
||||
uint8_t *temp_data_ptr = malloc((size_t)params->full_size);
|
||||
if (!temp_data_ptr) {
|
||||
goto end;
|
||||
}
|
||||
ethash_compute_full_data(temp_data_ptr, params, cache);
|
||||
ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name);
|
||||
char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name));
|
||||
if (!tmpfile) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!ethash_io_write_file(dirname, PASS_ARR(DAG_FILE_NAME), temp_data_ptr, (size_t)params->full_size)) {
|
||||
goto fail_free;
|
||||
}
|
||||
FILE *f;
|
||||
if (!force_create) {
|
||||
// try to open the file
|
||||
f = ethash_fopen(tmpfile, "rb+");
|
||||
if (f) {
|
||||
size_t found_size;
|
||||
if (!ethash_file_size(f, &found_size)) {
|
||||
fclose(f);
|
||||
goto free_memo;
|
||||
}
|
||||
if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) {
|
||||
fclose(f);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
// compare the magic number, no need to care about endianess since it's local
|
||||
uint64_t magic_num;
|
||||
if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
|
||||
// I/O error
|
||||
fclose(f);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
if (magic_num != ETHASH_DAG_MAGIC_NUM) {
|
||||
fclose(f);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
goto set_file;
|
||||
}
|
||||
}
|
||||
|
||||
ethash_io_serialize_info(REVISION, seedhash, info_buffer);
|
||||
if (!ethash_io_write_file(dirname, PASS_ARR(DAG_MEMO_NAME), info_buffer, DAG_MEMO_BYTESIZE)) {
|
||||
goto fail_free;
|
||||
}
|
||||
// file does not exist, will need to be created
|
||||
f = ethash_fopen(tmpfile, "wb+");
|
||||
if (!f) {
|
||||
goto free_memo;
|
||||
}
|
||||
// make sure it's of the proper size
|
||||
if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) {
|
||||
fclose(f);
|
||||
goto free_memo;
|
||||
}
|
||||
fputc('\n', f);
|
||||
fflush(f);
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
goto set_file;
|
||||
|
||||
*data = temp_data_ptr;
|
||||
*data_size = params->full_size;
|
||||
return true;
|
||||
|
||||
fail_free:
|
||||
free(temp_data_ptr);
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
set_file:
|
||||
*output_file = f;
|
||||
free_memo:
|
||||
free(tmpfile);
|
||||
end:
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#undef PASS_ARR
|
||||
|
207
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.h
generated
vendored
207
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.h
generated
vendored
@ -22,94 +22,163 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#ifdef __cplusplus
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
#endif
|
||||
#include <inttypes.h>
|
||||
#include "endian.h"
|
||||
#include "ethash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static const char DAG_FILE_NAME[] = "full";
|
||||
static const char DAG_MEMO_NAME[] = "full.info";
|
||||
// MSVC thinks that "static const unsigned int" is not a compile time variable. Sorry for the #define :(
|
||||
#define DAG_MEMO_BYTESIZE 36
|
||||
|
||||
// Maximum size for mutable part of DAG file name
|
||||
// 10 is for maximum number of digits of a uint32_t (for REVISION)
|
||||
// 1 is for _ and 16 is for the first 16 hex digits for first 8 bytes of
|
||||
// the seedhash and last 1 is for the null terminating character
|
||||
// Reference: https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
#define DAG_MUTABLE_NAME_MAX_SIZE (10 + 1 + 16 + 1)
|
||||
/// Possible return values of @see ethash_io_prepare
|
||||
enum ethash_io_rc {
|
||||
ETHASH_IO_FAIL = 0, ///< There has been an IO failure
|
||||
ETHASH_IO_MEMO_MISMATCH, ///< Memo file either did not exist or there was content mismatch
|
||||
ETHASH_IO_MEMO_MATCH, ///< Memo file existed and contents matched. No need to do anything
|
||||
ETHASH_IO_FAIL = 0, ///< There has been an IO failure
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH, ///< DAG with revision/hash match, but file size was wrong.
|
||||
ETHASH_IO_MEMO_MISMATCH, ///< The DAG file did not exist or there was revision/hash mismatch
|
||||
ETHASH_IO_MEMO_MATCH, ///< DAG file existed and revision/hash matched. No need to do anything
|
||||
};
|
||||
|
||||
// small hack for windows. I don't feel I should use va_args and forward just
|
||||
// to have this one function properly cross-platform abstracted
|
||||
#if defined(_WIN32) && !defined(__GNUC__)
|
||||
#define snprintf(...) sprintf_s(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Prepares io for ethash
|
||||
*
|
||||
* Create the DAG directory if it does not exist, and check if the memo file matches.
|
||||
* If it does not match then it's deleted to pave the way for @ref ethash_io_write()
|
||||
*
|
||||
* @param dirname A null terminated c-string of the path of the ethash
|
||||
* data directory. If it does not exist it's created.
|
||||
* @param seedhash The seedhash of the current block number
|
||||
* @return For possible return values @see enum ethash_io_rc
|
||||
*/
|
||||
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash);
|
||||
|
||||
/**
|
||||
* Fully computes data and writes it to the file on disk.
|
||||
*
|
||||
* This function should be called after @see ethash_io_prepare() and only if
|
||||
* its return value is @c ETHASH_IO_MEMO_MISMATCH. Will write both the full data
|
||||
* and the memo file.
|
||||
* Create the DAG directory and the DAG file if they don't exist.
|
||||
*
|
||||
* @param[in] dirname A null terminated c-string of the path of the ethash
|
||||
* data directory. Has to exist.
|
||||
* @param[in] params An ethash_params object containing the full size
|
||||
* and the cache size
|
||||
* @param[in] seedhash The seedhash of the current block number
|
||||
* @param[in] cache The cache data. Would have usually been calulated by
|
||||
* @see ethash_prep_light().
|
||||
* @param[out] data Pass a pointer to uint8_t by reference here. If the
|
||||
* function is succesfull then this point to the allocated
|
||||
* data calculated by @see ethash_prep_full(). Memory
|
||||
* ownership is transfered to the callee. Remember that
|
||||
* you eventually need to free this with a call to free().
|
||||
* @param[out] data_size Pass a uint64_t by value. If the function is succesfull
|
||||
* then this will contain the number of bytes allocated
|
||||
* for @a data.
|
||||
* @return True for success and false in case of failure.
|
||||
* data directory. If it does not exist it's created.
|
||||
* @param[in] seedhash The seedhash of the current block number, used in the
|
||||
* naming of the file as can be seen from the spec at:
|
||||
* https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
* @param[out] output_file If there was no failure then this will point to an open
|
||||
* file descriptor. User is responsible for closing it.
|
||||
* In the case of memo match then the file is open on read
|
||||
* mode, while on the case of mismatch a new file is created
|
||||
* on write mode
|
||||
* @param[in] file_size The size that the DAG file should have on disk
|
||||
* @param[out] force_create If true then there is no check to see if the file
|
||||
* already exists
|
||||
* @return For possible return values @see enum ethash_io_rc
|
||||
*/
|
||||
bool ethash_io_write(char const *dirname,
|
||||
ethash_params const* params,
|
||||
ethash_blockhash_t seedhash,
|
||||
void const* cache,
|
||||
uint8_t **data,
|
||||
uint64_t *data_size);
|
||||
enum ethash_io_rc ethash_io_prepare(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seedhash,
|
||||
FILE** output_file,
|
||||
uint64_t file_size,
|
||||
bool force_create
|
||||
);
|
||||
|
||||
static inline void ethash_io_serialize_info(uint32_t revision,
|
||||
ethash_blockhash_t seed_hash,
|
||||
char *output)
|
||||
/**
|
||||
* An fopen wrapper for no-warnings crossplatform fopen.
|
||||
*
|
||||
* Msvc compiler considers fopen to be insecure and suggests to use their
|
||||
* alternative. This is a wrapper for this alternative. Another way is to
|
||||
* #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
|
||||
* not sound like a good idea.
|
||||
*
|
||||
* @param file_name The path to the file to open
|
||||
* @param mode Opening mode. Check fopen()
|
||||
* @return The FILE* or NULL in failure
|
||||
*/
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode);
|
||||
|
||||
/**
|
||||
* An strncat wrapper for no-warnings crossplatform strncat.
|
||||
*
|
||||
* Msvc compiler considers strncat to be insecure and suggests to use their
|
||||
* alternative. This is a wrapper for this alternative. Another way is to
|
||||
* #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
|
||||
* not sound like a good idea.
|
||||
*
|
||||
* @param des Destination buffer
|
||||
* @param dest_size Maximum size of the destination buffer. This is the
|
||||
* extra argument for the MSVC secure strncat
|
||||
* @param src Souce buffer
|
||||
* @param count Number of bytes to copy from source
|
||||
* @return If all is well returns the dest buffer. If there is an
|
||||
* error returns NULL
|
||||
*/
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count);
|
||||
|
||||
/**
|
||||
* A cross-platform mkdir wrapper to create a directory or assert it's there
|
||||
*
|
||||
* @param dirname The full path of the directory to create
|
||||
* @return true if the directory was created or if it already
|
||||
* existed
|
||||
*/
|
||||
bool ethash_mkdir(char const* dirname);
|
||||
|
||||
/**
|
||||
* Get a file's size
|
||||
*
|
||||
* @param[in] f The open file stream whose size to get
|
||||
* @param[out] size Pass a size_t by reference to contain the file size
|
||||
* @return true in success and false if there was a failure
|
||||
*/
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size);
|
||||
|
||||
/**
|
||||
* Get a file descriptor number from a FILE stream
|
||||
*
|
||||
* @param f The file stream whose fd to get
|
||||
* @return Platform specific fd handler
|
||||
*/
|
||||
int ethash_fileno(FILE* f);
|
||||
|
||||
/**
|
||||
* Create the filename for the DAG.
|
||||
*
|
||||
* @param dirname The directory name in which the DAG file should reside
|
||||
* If it does not end with a directory separator it is appended.
|
||||
* @param filename The actual name of the file
|
||||
* @param filename_length The length of the filename in bytes
|
||||
* @return A char* containing the full name. User must deallocate.
|
||||
*/
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
);
|
||||
|
||||
/**
|
||||
* Gets the default directory name for the DAG depending on the system
|
||||
*
|
||||
* The spec defining this directory is here: https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
*
|
||||
* @param[out] strbuf A string buffer of sufficient size to keep the
|
||||
* null termninated string of the directory name
|
||||
* @param[in] buffsize Size of @a strbuf in bytes
|
||||
* @return true for success and false otherwise
|
||||
*/
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize);
|
||||
|
||||
static inline bool ethash_io_mutable_name(
|
||||
uint32_t revision,
|
||||
ethash_h256_t const* seed_hash,
|
||||
char* output
|
||||
)
|
||||
{
|
||||
// if .info is only consumed locally we don't really care about endianess
|
||||
memcpy(output, &revision, 4);
|
||||
memcpy(output + 4, &seed_hash, 32);
|
||||
uint64_t hash = *((uint64_t*)seed_hash);
|
||||
#if LITTLE_ENDIAN == BYTE_ORDER
|
||||
hash = ethash_swap_u64(hash);
|
||||
#endif
|
||||
return snprintf(output, DAG_MUTABLE_NAME_MAX_SIZE, "%u_%016" PRIx64, revision, hash) >= 0;
|
||||
}
|
||||
|
||||
static inline char *ethash_io_create_filename(char const *dirname,
|
||||
char const* filename,
|
||||
size_t filename_length)
|
||||
{
|
||||
// in C the cast is not needed, but a C++ compiler will complain for invalid conversion
|
||||
char *name = (char*)malloc(strlen(dirname) + filename_length);
|
||||
if (!name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
name[0] = '\0';
|
||||
strcat(name, dirname);
|
||||
strcat(name, filename);
|
||||
return name;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
116
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_posix.c
generated
vendored
116
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_posix.c
generated
vendored
@ -27,50 +27,76 @@
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash)
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode)
|
||||
{
|
||||
char read_buffer[DAG_MEMO_BYTESIZE];
|
||||
char expect_buffer[DAG_MEMO_BYTESIZE];
|
||||
enum ethash_io_rc ret = ETHASH_IO_FAIL;
|
||||
|
||||
// assert directory exists, full owner permissions and read/search for others
|
||||
int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
|
||||
if (rc == -1 && errno != EEXIST) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
char *memofile = ethash_io_create_filename(dirname, DAG_MEMO_NAME, sizeof(DAG_MEMO_NAME));
|
||||
if (!memofile) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
// try to open memo file
|
||||
FILE *f = fopen(memofile, "rb");
|
||||
if (!f) {
|
||||
// file does not exist, so no checking happens. All is fine.
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
|
||||
if (fread(read_buffer, 1, DAG_MEMO_BYTESIZE, f) != DAG_MEMO_BYTESIZE) {
|
||||
goto close;
|
||||
}
|
||||
|
||||
ethash_io_serialize_info(REVISION, seedhash, expect_buffer);
|
||||
if (memcmp(read_buffer, expect_buffer, DAG_MEMO_BYTESIZE) != 0) {
|
||||
// we have different memo contents so delete the memo file
|
||||
if (unlink(memofile) != 0) {
|
||||
goto close;
|
||||
}
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
}
|
||||
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
|
||||
close:
|
||||
fclose(f);
|
||||
free_memo:
|
||||
free(memofile);
|
||||
end:
|
||||
return ret;
|
||||
return fopen(file_name, mode);
|
||||
}
|
||||
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
|
||||
{
|
||||
return strlen(dest) + count + 1 <= dest_size ? strncat(dest, src, count) : NULL;
|
||||
}
|
||||
|
||||
bool ethash_mkdir(char const* dirname)
|
||||
{
|
||||
int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
|
||||
return rc != -1 || errno == EEXIST;
|
||||
}
|
||||
|
||||
int ethash_fileno(FILE *f)
|
||||
{
|
||||
return fileno(f);
|
||||
}
|
||||
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
)
|
||||
{
|
||||
size_t dirlen = strlen(dirname);
|
||||
size_t dest_size = dirlen + filename_length + 1;
|
||||
if (dirname[dirlen] != '/') {
|
||||
dest_size += 1;
|
||||
}
|
||||
char* name = malloc(dest_size);
|
||||
if (!name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
name[0] = '\0';
|
||||
ethash_strncat(name, dest_size, dirname, dirlen);
|
||||
if (dirname[dirlen] != '/') {
|
||||
ethash_strncat(name, dest_size, "/", 1);
|
||||
}
|
||||
ethash_strncat(name, dest_size, filename, filename_length);
|
||||
return name;
|
||||
}
|
||||
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size)
|
||||
{
|
||||
struct stat st;
|
||||
int fd;
|
||||
if ((fd = fileno(f)) == -1 || fstat(fd, &st) != 0) {
|
||||
return false;
|
||||
}
|
||||
*ret_size = st.st_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
|
||||
{
|
||||
static const char dir_suffix[] = ".ethash/";
|
||||
strbuf[0] = '\0';
|
||||
char* home_dir = getenv("HOME");
|
||||
size_t len = strlen(home_dir);
|
||||
if (!ethash_strncat(strbuf, buffsize, home_dir, len)) {
|
||||
return false;
|
||||
}
|
||||
if (home_dir[len] != '/') {
|
||||
if (!ethash_strncat(strbuf, buffsize, "/", 1)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
|
||||
}
|
||||
|
117
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_win32.c
generated
vendored
117
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_win32.c
generated
vendored
@ -23,51 +23,78 @@
|
||||
#include <direct.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <Shlobj.h>
|
||||
|
||||
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash)
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode)
|
||||
{
|
||||
char read_buffer[DAG_MEMO_BYTESIZE];
|
||||
char expect_buffer[DAG_MEMO_BYTESIZE];
|
||||
enum ethash_io_rc ret = ETHASH_IO_FAIL;
|
||||
|
||||
// assert directory exists
|
||||
int rc = _mkdir(dirname);
|
||||
if (rc == -1 && errno != EEXIST) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
char *memofile = ethash_io_create_filename(dirname, DAG_MEMO_NAME, sizeof(DAG_MEMO_NAME));
|
||||
if (!memofile) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
// try to open memo file
|
||||
FILE *f = fopen(memofile, "rb");
|
||||
if (!f) {
|
||||
// file does not exist, so no checking happens. All is fine.
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
|
||||
if (fread(read_buffer, 1, DAG_MEMO_BYTESIZE, f) != DAG_MEMO_BYTESIZE) {
|
||||
goto close;
|
||||
}
|
||||
|
||||
ethash_io_serialize_info(REVISION, seedhash, expect_buffer);
|
||||
if (memcmp(read_buffer, expect_buffer, DAG_MEMO_BYTESIZE) != 0) {
|
||||
// we have different memo contents so delete the memo file
|
||||
if (_unlink(memofile) != 0) {
|
||||
goto close;
|
||||
}
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
}
|
||||
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
|
||||
close:
|
||||
fclose(f);
|
||||
free_memo:
|
||||
free(memofile);
|
||||
end:
|
||||
return ret;
|
||||
FILE* f;
|
||||
return fopen_s(&f, file_name, mode) == 0 ? f : NULL;
|
||||
}
|
||||
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
|
||||
{
|
||||
return strncat_s(dest, dest_size, src, count) == 0 ? dest : NULL;
|
||||
}
|
||||
|
||||
bool ethash_mkdir(char const* dirname)
|
||||
{
|
||||
int rc = _mkdir(dirname);
|
||||
return rc != -1 || errno == EEXIST;
|
||||
}
|
||||
|
||||
int ethash_fileno(FILE* f)
|
||||
{
|
||||
return _fileno(f);
|
||||
}
|
||||
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
)
|
||||
{
|
||||
size_t dirlen = strlen(dirname);
|
||||
size_t dest_size = dirlen + filename_length + 1;
|
||||
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
|
||||
dest_size += 1;
|
||||
}
|
||||
char* name = malloc(dest_size);
|
||||
if (!name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
name[0] = '\0';
|
||||
ethash_strncat(name, dest_size, dirname, dirlen);
|
||||
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
|
||||
ethash_strncat(name, dest_size, "\\", 1);
|
||||
}
|
||||
ethash_strncat(name, dest_size, filename, filename_length);
|
||||
return name;
|
||||
}
|
||||
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size)
|
||||
{
|
||||
struct _stat st;
|
||||
int fd;
|
||||
if ((fd = _fileno(f)) == -1 || _fstat(fd, &st) != 0) {
|
||||
return false;
|
||||
}
|
||||
*ret_size = st.st_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
|
||||
{
|
||||
static const char dir_suffix[] = "Appdata\\Ethash\\";
|
||||
strbuf[0] = '\0';
|
||||
if (!SUCCEEDED(SHGetFolderPathW(NULL, CSIDL_PROFILE, NULL, 0, (WCHAR*)strbuf))) {
|
||||
return false;
|
||||
}
|
||||
if (!ethash_strncat(strbuf, buffsize, "\\", 1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
|
||||
}
|
||||
|
47
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap.h
generated
vendored
Normal file
47
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap.h
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file mmap.h
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#pragma once
|
||||
#if defined(__MINGW32__) || defined(_WIN32)
|
||||
#include <sys/types.h>
|
||||
|
||||
#define PROT_READ 0x1
|
||||
#define PROT_WRITE 0x2
|
||||
/* This flag is only available in WinXP+ */
|
||||
#ifdef FILE_MAP_EXECUTE
|
||||
#define PROT_EXEC 0x4
|
||||
#else
|
||||
#define PROT_EXEC 0x0
|
||||
#define FILE_MAP_EXECUTE 0
|
||||
#endif
|
||||
|
||||
#define MAP_SHARED 0x01
|
||||
#define MAP_PRIVATE 0x02
|
||||
#define MAP_ANONYMOUS 0x20
|
||||
#define MAP_ANON MAP_ANONYMOUS
|
||||
#define MAP_FAILED ((void *) -1)
|
||||
|
||||
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset);
|
||||
void munmap(void* addr, size_t length);
|
||||
#else // posix, yay! ^_^
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
|
84
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap_win32.c
generated
vendored
Normal file
84
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap_win32.c
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/* mmap() replacement for Windows
|
||||
*
|
||||
* Author: Mike Frysinger <vapier@gentoo.org>
|
||||
* Placed into the public domain
|
||||
*/
|
||||
|
||||
/* References:
|
||||
* CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx
|
||||
* CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx
|
||||
* MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx
|
||||
* UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx
|
||||
*/
|
||||
|
||||
#include <io.h>
|
||||
#include <windows.h>
|
||||
#include "mmap.h"
|
||||
|
||||
#ifdef __USE_FILE_OFFSET64
|
||||
# define DWORD_HI(x) (x >> 32)
|
||||
# define DWORD_LO(x) ((x) & 0xffffffff)
|
||||
#else
|
||||
# define DWORD_HI(x) (0)
|
||||
# define DWORD_LO(x) (x)
|
||||
#endif
|
||||
|
||||
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset)
|
||||
{
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
||||
return MAP_FAILED;
|
||||
if (fd == -1) {
|
||||
if (!(flags & MAP_ANON) || offset)
|
||||
return MAP_FAILED;
|
||||
} else if (flags & MAP_ANON)
|
||||
return MAP_FAILED;
|
||||
|
||||
DWORD flProtect;
|
||||
if (prot & PROT_WRITE) {
|
||||
if (prot & PROT_EXEC)
|
||||
flProtect = PAGE_EXECUTE_READWRITE;
|
||||
else
|
||||
flProtect = PAGE_READWRITE;
|
||||
} else if (prot & PROT_EXEC) {
|
||||
if (prot & PROT_READ)
|
||||
flProtect = PAGE_EXECUTE_READ;
|
||||
else if (prot & PROT_EXEC)
|
||||
flProtect = PAGE_EXECUTE;
|
||||
} else
|
||||
flProtect = PAGE_READONLY;
|
||||
|
||||
off_t end = length + offset;
|
||||
HANDLE mmap_fd, h;
|
||||
if (fd == -1)
|
||||
mmap_fd = INVALID_HANDLE_VALUE;
|
||||
else
|
||||
mmap_fd = (HANDLE)_get_osfhandle(fd);
|
||||
h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL);
|
||||
if (h == NULL)
|
||||
return MAP_FAILED;
|
||||
|
||||
DWORD dwDesiredAccess;
|
||||
if (prot & PROT_WRITE)
|
||||
dwDesiredAccess = FILE_MAP_WRITE;
|
||||
else
|
||||
dwDesiredAccess = FILE_MAP_READ;
|
||||
if (prot & PROT_EXEC)
|
||||
dwDesiredAccess |= FILE_MAP_EXECUTE;
|
||||
if (flags & MAP_PRIVATE)
|
||||
dwDesiredAccess |= FILE_MAP_COPY;
|
||||
void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length);
|
||||
if (ret == NULL) {
|
||||
ret = MAP_FAILED;
|
||||
}
|
||||
// since we are handling the file ourselves with fd, close the Windows Handle here
|
||||
CloseHandle(h);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void munmap(void* addr, size_t length)
|
||||
{
|
||||
UnmapViewOfFile(addr);
|
||||
}
|
||||
|
||||
#undef DWORD_HI
|
||||
#undef DWORD_LO
|
192
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.c
generated
vendored
192
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.c
generated
vendored
@ -17,65 +17,65 @@
|
||||
|
||||
/*** Constants. ***/
|
||||
static const uint8_t rho[24] = \
|
||||
{ 1, 3, 6, 10, 15, 21,
|
||||
28, 36, 45, 55, 2, 14,
|
||||
27, 41, 56, 8, 25, 43,
|
||||
62, 18, 39, 61, 20, 44};
|
||||
{ 1, 3, 6, 10, 15, 21,
|
||||
28, 36, 45, 55, 2, 14,
|
||||
27, 41, 56, 8, 25, 43,
|
||||
62, 18, 39, 61, 20, 44};
|
||||
static const uint8_t pi[24] = \
|
||||
{10, 7, 11, 17, 18, 3,
|
||||
5, 16, 8, 21, 24, 4,
|
||||
15, 23, 19, 13, 12, 2,
|
||||
20, 14, 22, 9, 6, 1};
|
||||
{10, 7, 11, 17, 18, 3,
|
||||
5, 16, 8, 21, 24, 4,
|
||||
15, 23, 19, 13, 12, 2,
|
||||
20, 14, 22, 9, 6, 1};
|
||||
static const uint64_t RC[24] = \
|
||||
{1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
|
||||
0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
|
||||
0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
|
||||
0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
|
||||
0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
|
||||
0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
|
||||
{1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
|
||||
0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
|
||||
0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
|
||||
0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
|
||||
0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
|
||||
0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
|
||||
|
||||
/*** Helper macros to unroll the permutation. ***/
|
||||
#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
|
||||
#define REPEAT6(e) e e e e e e
|
||||
#define REPEAT24(e) REPEAT6(e e e e)
|
||||
#define REPEAT5(e) e e e e e
|
||||
#define FOR5(v, s, e) \
|
||||
v = 0; \
|
||||
REPEAT5(e; v += s;)
|
||||
#define FOR5(v, s, e) \
|
||||
v = 0; \
|
||||
REPEAT5(e; v += s;)
|
||||
|
||||
/*** Keccak-f[1600] ***/
|
||||
static inline void keccakf(void* state) {
|
||||
uint64_t* a = (uint64_t*)state;
|
||||
uint64_t b[5] = {0};
|
||||
uint64_t t = 0;
|
||||
uint8_t x, y;
|
||||
uint64_t* a = (uint64_t*)state;
|
||||
uint64_t b[5] = {0};
|
||||
uint64_t t = 0;
|
||||
uint8_t x, y;
|
||||
|
||||
for (int i = 0; i < 24; i++) {
|
||||
// Theta
|
||||
FOR5(x, 1,
|
||||
b[x] = 0;
|
||||
FOR5(y, 5,
|
||||
b[x] ^= a[x + y]; ))
|
||||
FOR5(x, 1,
|
||||
FOR5(y, 5,
|
||||
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
|
||||
// Rho and pi
|
||||
t = a[1];
|
||||
x = 0;
|
||||
REPEAT24(b[0] = a[pi[x]];
|
||||
a[pi[x]] = rol(t, rho[x]);
|
||||
t = b[0];
|
||||
x++; )
|
||||
// Chi
|
||||
FOR5(y,
|
||||
5,
|
||||
FOR5(x, 1,
|
||||
b[x] = a[y + x];)
|
||||
FOR5(x, 1,
|
||||
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
|
||||
// Iota
|
||||
a[0] ^= RC[i];
|
||||
}
|
||||
for (int i = 0; i < 24; i++) {
|
||||
// Theta
|
||||
FOR5(x, 1,
|
||||
b[x] = 0;
|
||||
FOR5(y, 5,
|
||||
b[x] ^= a[x + y]; ))
|
||||
FOR5(x, 1,
|
||||
FOR5(y, 5,
|
||||
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
|
||||
// Rho and pi
|
||||
t = a[1];
|
||||
x = 0;
|
||||
REPEAT24(b[0] = a[pi[x]];
|
||||
a[pi[x]] = rol(t, rho[x]);
|
||||
t = b[0];
|
||||
x++; )
|
||||
// Chi
|
||||
FOR5(y,
|
||||
5,
|
||||
FOR5(x, 1,
|
||||
b[x] = a[y + x];)
|
||||
FOR5(x, 1,
|
||||
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
|
||||
// Iota
|
||||
a[0] ^= RC[i];
|
||||
}
|
||||
}
|
||||
|
||||
/******** The FIPS202-defined functions. ********/
|
||||
@ -83,20 +83,20 @@ static inline void keccakf(void* state) {
|
||||
/*** Some helper macros. ***/
|
||||
|
||||
#define _(S) do { S } while (0)
|
||||
#define FOR(i, ST, L, S) \
|
||||
_(for (size_t i = 0; i < L; i += ST) { S; })
|
||||
#define mkapply_ds(NAME, S) \
|
||||
static inline void NAME(uint8_t* dst, \
|
||||
const uint8_t* src, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
#define mkapply_sd(NAME, S) \
|
||||
static inline void NAME(const uint8_t* src, \
|
||||
uint8_t* dst, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
#define FOR(i, ST, L, S) \
|
||||
_(for (size_t i = 0; i < L; i += ST) { S; })
|
||||
#define mkapply_ds(NAME, S) \
|
||||
static inline void NAME(uint8_t* dst, \
|
||||
const uint8_t* src, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
#define mkapply_sd(NAME, S) \
|
||||
static inline void NAME(const uint8_t* src, \
|
||||
uint8_t* dst, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
|
||||
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
|
||||
mkapply_sd(setout, dst[i] = src[i]) // setout
|
||||
@ -105,46 +105,46 @@ mkapply_sd(setout, dst[i] = src[i]) // setout
|
||||
#define Plen 200
|
||||
|
||||
// Fold P*F over the full blocks of an input.
|
||||
#define foldP(I, L, F) \
|
||||
while (L >= rate) { \
|
||||
F(a, I, rate); \
|
||||
P(a); \
|
||||
I += rate; \
|
||||
L -= rate; \
|
||||
}
|
||||
#define foldP(I, L, F) \
|
||||
while (L >= rate) { \
|
||||
F(a, I, rate); \
|
||||
P(a); \
|
||||
I += rate; \
|
||||
L -= rate; \
|
||||
}
|
||||
|
||||
/** The sponge-based hash construction. **/
|
||||
static inline int hash(uint8_t* out, size_t outlen,
|
||||
const uint8_t* in, size_t inlen,
|
||||
size_t rate, uint8_t delim) {
|
||||
if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
|
||||
return -1;
|
||||
}
|
||||
uint8_t a[Plen] = {0};
|
||||
// Absorb input.
|
||||
foldP(in, inlen, xorin);
|
||||
// Xor in the DS and pad frame.
|
||||
a[inlen] ^= delim;
|
||||
a[rate - 1] ^= 0x80;
|
||||
// Xor in the last block.
|
||||
xorin(a, in, inlen);
|
||||
// Apply P
|
||||
P(a);
|
||||
// Squeeze output.
|
||||
foldP(out, outlen, setout);
|
||||
setout(a, out, outlen);
|
||||
memset(a, 0, 200);
|
||||
return 0;
|
||||
const uint8_t* in, size_t inlen,
|
||||
size_t rate, uint8_t delim) {
|
||||
if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
|
||||
return -1;
|
||||
}
|
||||
uint8_t a[Plen] = {0};
|
||||
// Absorb input.
|
||||
foldP(in, inlen, xorin);
|
||||
// Xor in the DS and pad frame.
|
||||
a[inlen] ^= delim;
|
||||
a[rate - 1] ^= 0x80;
|
||||
// Xor in the last block.
|
||||
xorin(a, in, inlen);
|
||||
// Apply P
|
||||
P(a);
|
||||
// Squeeze output.
|
||||
foldP(out, outlen, setout);
|
||||
setout(a, out, outlen);
|
||||
memset(a, 0, 200);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define defsha3(bits) \
|
||||
int sha3_##bits(uint8_t* out, size_t outlen, \
|
||||
const uint8_t* in, size_t inlen) { \
|
||||
if (outlen > (bits/8)) { \
|
||||
return -1; \
|
||||
} \
|
||||
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
|
||||
}
|
||||
#define defsha3(bits) \
|
||||
int sha3_##bits(uint8_t* out, size_t outlen, \
|
||||
const uint8_t* in, size_t inlen) { \
|
||||
if (outlen > (bits/8)) { \
|
||||
return -1; \
|
||||
} \
|
||||
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
|
||||
}
|
||||
|
||||
/*** FIPS202 SHA3 FOFs ***/
|
||||
defsha3(256)
|
||||
|
12
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.h
generated
vendored
12
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.h
generated
vendored
@ -8,22 +8,22 @@ extern "C" {
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
struct ethash_blockhash;
|
||||
struct ethash_h256;
|
||||
|
||||
#define decsha3(bits) \
|
||||
int sha3_##bits(uint8_t*, size_t, const uint8_t*, size_t);
|
||||
int sha3_##bits(uint8_t*, size_t, uint8_t const*, size_t);
|
||||
|
||||
decsha3(256)
|
||||
decsha3(512)
|
||||
|
||||
static inline void SHA3_256(struct ethash_blockhash const* ret, uint8_t const *data, const size_t size)
|
||||
static inline void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t const size)
|
||||
{
|
||||
sha3_256((uint8_t*)ret, 32, data, size);
|
||||
sha3_256((uint8_t*)ret, 32, data, size);
|
||||
}
|
||||
|
||||
static inline void SHA3_512(uint8_t *ret, uint8_t const *data, const size_t size)
|
||||
static inline void SHA3_512(uint8_t* ret, uint8_t const* data, size_t const size)
|
||||
{
|
||||
sha3_512(ret, 64, data, size);
|
||||
sha3_512(ret, 64, data, size);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
14
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp
generated
vendored
14
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp
generated
vendored
@ -23,13 +23,15 @@
|
||||
#include <cryptopp/sha3.h>
|
||||
|
||||
extern "C" {
|
||||
struct ethash_blockhash;
|
||||
typedef struct ethash_blockhash ethash_blockhash_t;
|
||||
void SHA3_256(ethash_blockhash_t const* ret, const uint8_t *data, size_t size) {
|
||||
CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size);
|
||||
struct ethash_h256;
|
||||
typedef struct ethash_h256 ethash_h256_t;
|
||||
void SHA3_256(ethash_h256_t const* ret, uint8_t const* data, size_t size)
|
||||
{
|
||||
CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size);
|
||||
}
|
||||
|
||||
void SHA3_512(uint8_t *const ret, const uint8_t *data, size_t size) {
|
||||
CryptoPP::SHA3_512().CalculateDigest(ret, data, size);
|
||||
void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size)
|
||||
{
|
||||
CryptoPP::SHA3_512().CalculateDigest(ret, data, size);
|
||||
}
|
||||
}
|
||||
|
7
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h
generated
vendored
7
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h
generated
vendored
@ -8,11 +8,10 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct ethash_blockhash;
|
||||
typedef struct ethash_blockhash ethash_blockhash_t;
|
||||
struct ethash_h256;
|
||||
|
||||
void SHA3_256(ethash_blockhash_t *const ret, const uint8_t *data, size_t size);
|
||||
void SHA3_512(uint8_t *const ret, const uint8_t *data, size_t size);
|
||||
void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t size);
|
||||
void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
10
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util.h
generated
vendored
10
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util.h
generated
vendored
@ -26,11 +26,11 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
void debugf(const char *str, ...);
|
||||
#else
|
||||
#define debugf printf
|
||||
#endif
|
||||
//#ifdef _MSC_VER
|
||||
void debugf(char const* str, ...);
|
||||
//#else
|
||||
//#define debugf printf
|
||||
//#endif
|
||||
|
||||
static inline uint32_t min_u32(uint32_t a, uint32_t b)
|
||||
{
|
||||
|
9
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util.c → Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util_win32.c
generated
vendored
9
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util.c → Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util_win32.c
generated
vendored
@ -22,20 +22,17 @@
|
||||
#include <stdio.h>
|
||||
#include "util.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
// foward declare without all of Windows.h
|
||||
__declspec(dllimport) void __stdcall OutputDebugStringA(const char* lpOutputString);
|
||||
__declspec(dllimport) void __stdcall OutputDebugStringA(char const* lpOutputString);
|
||||
|
||||
void debugf(const char *str, ...)
|
||||
void debugf(char const* str, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, str);
|
||||
va_start(args, str);
|
||||
|
||||
char buf[1<<16];
|
||||
_vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args);
|
||||
buf[sizeof(buf)-1] = '\0';
|
||||
OutputDebugStringA(buf);
|
||||
}
|
||||
|
||||
#endif
|
79
Godeps/_workspace/src/github.com/ethereum/ethash/src/python/core.c
generated
vendored
79
Godeps/_workspace/src/github.com/ethereum/ethash/src/python/core.c
generated
vendored
@ -13,16 +13,16 @@
|
||||
#define PY_CONST_STRING_FORMAT "s"
|
||||
#endif
|
||||
|
||||
#define MIX_WORDS (MIX_BYTES/4)
|
||||
#define MIX_WORDS (ETHASH_MIX_BYTES/4)
|
||||
|
||||
static PyObject *
|
||||
get_cache_size(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
if (block_number >= EPOCH_LENGTH * 2048) {
|
||||
if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
|
||||
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
@ -36,9 +36,9 @@ get_full_size(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
if (block_number >= EPOCH_LENGTH * 2048) {
|
||||
if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
|
||||
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
@ -69,7 +69,7 @@ mkcache_bytes(PyObject *self, PyObject *args) {
|
||||
params.cache_size = (size_t) cache_size;
|
||||
ethash_cache cache;
|
||||
cache.mem = malloc(cache_size);
|
||||
ethash_mkcache(&cache, ¶ms, (ethash_blockhash_t *) seed);
|
||||
ethash_mkcache(&cache, ¶ms, (ethash_h256_t *) seed);
|
||||
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, cache.mem, cache_size);
|
||||
free(cache.mem);
|
||||
return val;
|
||||
@ -92,9 +92,9 @@ calc_dataset_bytes(PyObject *self, PyObject *args) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cache_size % HASH_BYTES != 0) {
|
||||
if (cache_size % ETHASH_HASH_BYTES != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size);
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
@ -127,9 +127,9 @@ hashimoto_light(PyObject *self, PyObject *args) {
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
if (cache_size % HASH_BYTES != 0) {
|
||||
if (cache_size % ETHASH_HASH_BYTES != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size);
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
@ -146,7 +146,7 @@ hashimoto_light(PyObject *self, PyObject *args) {
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_cache cache;
|
||||
cache.mem = (void *) cache_bytes;
|
||||
ethash_light(&out, &cache, ¶ms, (ethash_blockhash_t *) header, nonce);
|
||||
ethash_light(&out, &cache, ¶ms, (ethash_h256_t *) header, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
@ -181,7 +181,7 @@ hashimoto_full(PyObject *self, PyObject *args) {
|
||||
ethash_return_value out;
|
||||
ethash_params params;
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (ethash_blockhash_t *) header, nonce);
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (ethash_h256_t *) header, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
@ -227,9 +227,9 @@ mine(PyObject *self, PyObject *args) {
|
||||
|
||||
// TODO: Multi threading?
|
||||
do {
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (const ethash_blockhash_t *) header, nonce++);
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (const ethash_h256_t *) header, nonce++);
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
} while (!ethash_check_difficulty(&out.result, (const ethash_blockhash_t *) difficulty));
|
||||
} while (!ethash_check_difficulty(&out.result, (const ethash_h256_t *) difficulty));
|
||||
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
@ -243,15 +243,14 @@ get_seedhash(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
if (block_number >= EPOCH_LENGTH * 2048) {
|
||||
if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
|
||||
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
ethash_blockhash_t seedhash;
|
||||
ethash_get_seedhash(&seedhash, block_number);
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
|
||||
return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32);
|
||||
}
|
||||
|
||||
@ -306,17 +305,17 @@ static struct PyModuleDef PyethashModule = {
|
||||
PyMODINIT_FUNC PyInit_pyethash(void) {
|
||||
PyObject *module = PyModule_Create(&PyethashModule);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ACCESSES);
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
return module;
|
||||
}
|
||||
#else
|
||||
@ -324,16 +323,16 @@ PyMODINIT_FUNC
|
||||
initpyethash(void) {
|
||||
PyObject *module = Py_InitModule("pyethash", PyethashMethods);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ACCESSES);
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
}
|
||||
#endif
|
||||
|
13
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/CMakeLists.txt
generated
vendored
13
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/CMakeLists.txt
generated
vendored
@ -27,13 +27,16 @@ IF( NOT Boost_FOUND )
|
||||
find_package(Boost 1.48.0 COMPONENTS unit_test_framework system filesystem)
|
||||
ENDIF()
|
||||
|
||||
IF( Boost_FOUND )
|
||||
IF (Boost_FOUND)
|
||||
message(STATUS "boost header: ${Boost_INCLUDE_DIRS}")
|
||||
message(STATUS "boost libs : ${Boost_LIBRARIES}")
|
||||
|
||||
include_directories( ${Boost_INCLUDE_DIR} )
|
||||
include_directories(../../src)
|
||||
|
||||
link_directories ( ${Boost_LIBRARY_DIRS} )
|
||||
link_directories(${Boost_LIBRARY_DIRS})
|
||||
file(GLOB HEADERS "*.h")
|
||||
if (NOT MSVC)
|
||||
if ((NOT MSVC) AND (NOT APPLE))
|
||||
ADD_DEFINITIONS(-DBOOST_TEST_DYN_LINK)
|
||||
endif()
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
@ -48,11 +51,11 @@ IF( Boost_FOUND )
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ")
|
||||
endif()
|
||||
|
||||
add_executable (Test test.cpp ${HEADERS})
|
||||
add_executable (Test "./test.cpp" ${HEADERS})
|
||||
target_link_libraries(Test ${ETHHASH_LIBS})
|
||||
target_link_libraries(Test ${Boost_FILESYSTEM_LIBRARIES})
|
||||
target_link_libraries(Test ${Boost_SYSTEM_LIBRARIES})
|
||||
target_link_libraries (Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
|
||||
target_link_libraries(Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARIES})
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
TARGET_LINK_LIBRARIES(Test ${CRYPTOPP_LIBRARIES})
|
||||
|
879
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.cpp
generated
vendored
879
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.cpp
generated
vendored
@ -12,6 +12,11 @@
|
||||
#include <libethash/sha3.h>
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#include <Shlobj.h>
|
||||
#endif
|
||||
|
||||
#define BOOST_TEST_MODULE Daggerhashimoto
|
||||
#define BOOST_TEST_MAIN
|
||||
|
||||
@ -22,367 +27,609 @@
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
using namespace std;
|
||||
using byte = uint8_t;
|
||||
using bytes = std::vector<byte>;
|
||||
namespace fs = boost::filesystem;
|
||||
|
||||
// Just an alloca "wrapper" to silence uint64_t to size_t conversion warnings in windows
|
||||
// consider replacing alloca calls with something better though!
|
||||
#define our_alloca(param__) alloca((size_t)(param__))
|
||||
|
||||
std::string bytesToHexString(const uint8_t *str, const uint64_t s) {
|
||||
std::ostringstream ret;
|
||||
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i];
|
||||
// some functions taken from eth::dev for convenience.
|
||||
std::string bytesToHexString(const uint8_t *str, const uint64_t s)
|
||||
{
|
||||
std::ostringstream ret;
|
||||
|
||||
return ret.str();
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i];
|
||||
|
||||
return ret.str();
|
||||
}
|
||||
|
||||
std::string blockhashToHexString(ethash_blockhash_t *hash) {
|
||||
return bytesToHexString((uint8_t*)hash, 32);
|
||||
std::string blockhashToHexString(ethash_h256_t* _hash)
|
||||
{
|
||||
return bytesToHexString((uint8_t*)_hash, 32);
|
||||
}
|
||||
|
||||
int fromHex(char _i)
|
||||
{
|
||||
if (_i >= '0' && _i <= '9')
|
||||
return _i - '0';
|
||||
if (_i >= 'a' && _i <= 'f')
|
||||
return _i - 'a' + 10;
|
||||
if (_i >= 'A' && _i <= 'F')
|
||||
return _i - 'A' + 10;
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(false, "should never get here");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bytes hexStringToBytes(std::string const& _s)
|
||||
{
|
||||
unsigned s = (_s[0] == '0' && _s[1] == 'x') ? 2 : 0;
|
||||
std::vector<uint8_t> ret;
|
||||
ret.reserve((_s.size() - s + 1) / 2);
|
||||
|
||||
if (_s.size() % 2)
|
||||
try
|
||||
{
|
||||
ret.push_back(fromHex(_s[s++]));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ret.push_back(0);
|
||||
}
|
||||
for (unsigned i = s; i < _s.size(); i += 2)
|
||||
try
|
||||
{
|
||||
ret.push_back((byte)(fromHex(_s[i]) * 16 + fromHex(_s[i + 1])));
|
||||
}
|
||||
catch (...){
|
||||
ret.push_back(0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ethash_h256_t stringToBlockhash(std::string const& _s)
|
||||
{
|
||||
ethash_h256_t ret;
|
||||
bytes b = hexStringToBytes(_s);
|
||||
memcpy(&ret, b.data(), b.size());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
BOOST_AUTO_TEST_CASE(fnv_hash_check) {
|
||||
uint32_t x = 1235U;
|
||||
const uint32_t
|
||||
y = 9999999U,
|
||||
expected = (FNV_PRIME * x) ^y;
|
||||
uint32_t x = 1235U;
|
||||
const uint32_t
|
||||
y = 9999999U,
|
||||
expected = (FNV_PRIME * x) ^y;
|
||||
|
||||
x = fnv_hash(x, y);
|
||||
x = fnv_hash(x, y);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(x == expected,
|
||||
"\nexpected: " << expected << "\n"
|
||||
<< "actual: " << x << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(x == expected,
|
||||
"\nexpected: " << expected << "\n"
|
||||
<< "actual: " << x << "\n");
|
||||
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(SHA256_check) {
|
||||
ethash_blockhash_t input;
|
||||
ethash_blockhash_t out;
|
||||
memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
SHA3_256(&out, (uint8_t*)&input, 32);
|
||||
const std::string
|
||||
expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7",
|
||||
actual = bytesToHexString((uint8_t*)&out, 32);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
ethash_h256_t input;
|
||||
ethash_h256_t out;
|
||||
memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
SHA3_256(&out, (uint8_t*)&input, 32);
|
||||
const std::string
|
||||
expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7",
|
||||
actual = bytesToHexString((uint8_t*)&out, 32);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(SHA512_check) {
|
||||
uint8_t input[64], out[64];
|
||||
memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64);
|
||||
SHA3_512(out, input, 64);
|
||||
const std::string
|
||||
expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405",
|
||||
actual = bytesToHexString(out, 64);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
uint8_t input[64], out[64];
|
||||
memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64);
|
||||
SHA3_512(out, input, 64);
|
||||
const std::string
|
||||
expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405",
|
||||
actual = bytesToHexString(out, 64);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_swap_endian32) {
|
||||
uint32_t v32 = (uint32_t)0xBAADF00D;
|
||||
v32 = ethash_swap_u32(v32);
|
||||
BOOST_REQUIRE_EQUAL(v32, (uint32_t)0x0DF0ADBA);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_swap_endian64) {
|
||||
uint64_t v64 = (uint64_t)0xFEE1DEADDEADBEEF;
|
||||
v64 = ethash_swap_u64(v64);
|
||||
BOOST_REQUIRE_EQUAL(v64, (uint64_t)0xEFBEADDEADDEE1FE);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) {
|
||||
ethash_params params;
|
||||
ethash_params_init(¶ms, 0);
|
||||
BOOST_REQUIRE_MESSAGE(params.full_size < DATASET_BYTES_INIT,
|
||||
"\nfull size: " << params.full_size << "\n"
|
||||
<< "should be less than or equal to: " << DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(params.full_size + 20 * MIX_BYTES >= DATASET_BYTES_INIT,
|
||||
"\nfull size + 20*MIX_BYTES: " << params.full_size + 20 * MIX_BYTES << "\n"
|
||||
<< "should be greater than or equal to: " << DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(params.cache_size < DATASET_BYTES_INIT / 32,
|
||||
"\ncache size: " << params.cache_size << "\n"
|
||||
<< "should be less than or equal to: " << DATASET_BYTES_INIT / 32 << "\n");
|
||||
uint64_t full_size = ethash_get_datasize(0);
|
||||
uint64_t cache_size = ethash_get_cachesize(0);
|
||||
BOOST_REQUIRE_MESSAGE(full_size < ETHASH_DATASET_BYTES_INIT,
|
||||
"\nfull size: " << full_size << "\n"
|
||||
<< "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(full_size + 20 * ETHASH_MIX_BYTES >= ETHASH_DATASET_BYTES_INIT,
|
||||
"\nfull size + 20*MIX_BYTES: " << full_size + 20 * ETHASH_MIX_BYTES << "\n"
|
||||
<< "should be greater than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(cache_size < ETHASH_DATASET_BYTES_INIT / 32,
|
||||
"\ncache size: " << cache_size << "\n"
|
||||
<< "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT / 32 << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) {
|
||||
ethash_params params;
|
||||
ethash_params_init(¶ms, 0);
|
||||
const uint32_t expected_full_size = 1073739904;
|
||||
const uint32_t expected_cache_size = 16776896;
|
||||
BOOST_REQUIRE_MESSAGE(params.full_size == expected_full_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << params.full_size << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(params.cache_size == expected_cache_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << params.cache_size << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
|
||||
ethash_params params;
|
||||
ethash_blockhash_t seed;
|
||||
ethash_blockhash_t hash;
|
||||
ethash_blockhash_t difficulty;
|
||||
ethash_return_value light_out;
|
||||
ethash_return_value full_out;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
// Set the difficulty
|
||||
ethash_blockhash_set(&difficulty, 0, 197);
|
||||
ethash_blockhash_set(&difficulty, 1, 90);
|
||||
for (int i = 2; i < 32; i++)
|
||||
ethash_blockhash_set(&difficulty, i, 255);
|
||||
|
||||
ethash_params_init(¶ms, 0);
|
||||
params.cache_size = 1024;
|
||||
params.full_size = 1024 * 32;
|
||||
ethash_cache cache;
|
||||
cache.mem = our_alloca(params.cache_size);
|
||||
ethash_mkcache(&cache, ¶ms, &seed);
|
||||
node *full_mem = (node *) our_alloca(params.full_size);
|
||||
ethash_compute_full_data(full_mem, ¶ms, &cache);
|
||||
|
||||
{
|
||||
const std::string
|
||||
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b",
|
||||
actual = bytesToHexString((uint8_t const *) cache.mem, params.cache_size);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
node node;
|
||||
ethash_calculate_dag_item(&node, 0, ¶ms, &cache);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &node, sizeof(node)),
|
||||
expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597";
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\n" << "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
{
|
||||
for (int i = 0; i < params.full_size / sizeof(node); ++i) {
|
||||
for (uint32_t j = 0; j < 32; ++j) {
|
||||
node expected_node;
|
||||
ethash_calculate_dag_item(&expected_node, j, ¶ms, &cache);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &(full_mem[j]), sizeof(node)),
|
||||
expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node));
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\ni: " << j << "\n"
|
||||
<< "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
uint64_t nonce = 0x7c7c597c;
|
||||
ethash_full(&full_out, full_mem, ¶ms, &hash, nonce);
|
||||
ethash_light(&light_out, &cache, ¶ms, &hash, nonce);
|
||||
const std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
const std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
ethash_blockhash_t check_hash;
|
||||
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
|
||||
const std::string check_hash_string = blockhashToHexString(&check_hash);
|
||||
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
|
||||
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
ethash_full(&full_out, full_mem, ¶ms, &hash, 5);
|
||||
std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string,
|
||||
"\nlight result and full result should differ: " << light_result_string.c_str() << "\n");
|
||||
|
||||
ethash_light(&light_out, &cache, ¶ms, &hash, 5);
|
||||
light_result_string = blockhashToHexString(&light_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result and full result should be the same\n"
|
||||
<< "light result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty),
|
||||
"ethash_check_difficulty failed"
|
||||
);
|
||||
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty),
|
||||
"ethash_quick_check_difficulty failed"
|
||||
);
|
||||
}
|
||||
uint64_t full_size = ethash_get_datasize(0);
|
||||
uint64_t cache_size = ethash_get_cachesize(0);
|
||||
const uint32_t expected_full_size = 1073739904;
|
||||
const uint32_t expected_cache_size = 16776896;
|
||||
BOOST_REQUIRE_MESSAGE(full_size == expected_full_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << full_size << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(cache_size == expected_cache_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << cache_size << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) {
|
||||
ethash_blockhash_t hash;
|
||||
ethash_blockhash_t target;
|
||||
memcpy(&hash, "11111111111111111111111111111111", 32);
|
||||
memcpy(&target, "22222222222222222222222222222222", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target),
|
||||
"\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n");
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &hash), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111112", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111110", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
!ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n");
|
||||
ethash_h256_t hash;
|
||||
ethash_h256_t target;
|
||||
memcpy(&hash, "11111111111111111111111111111111", 32);
|
||||
memcpy(&target, "22222222222222222222222222222222", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target),
|
||||
"\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n");
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &hash), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111112", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111110", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
!ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_mutable_name) {
|
||||
char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
|
||||
// should have at least 8 bytes provided since this is what we test :)
|
||||
ethash_h256_t seed1 = ethash_h256_static_init(0, 10, 65, 255, 34, 55, 22, 8);
|
||||
ethash_io_mutable_name(1, &seed1, mutable_name);
|
||||
BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "1_000a41ff22371608"));
|
||||
ethash_h256_t seed2 = ethash_h256_static_init(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
ethash_io_mutable_name(44, &seed2, mutable_name);
|
||||
BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "44_0000000000000000"));
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_dir_creation) {
|
||||
ethash_blockhash_t seedhash;
|
||||
memset(&seedhash, 0, 32);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash)
|
||||
);
|
||||
ethash_h256_t seedhash;
|
||||
FILE *f = NULL;
|
||||
memset(&seedhash, 0, 32);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_write_files_are_created) {
|
||||
ethash_blockhash_t seedhash;
|
||||
static const int blockn = 0;
|
||||
ethash_get_seedhash(&seedhash, blockn);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash)
|
||||
);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
|
||||
ethash_cache cache;
|
||||
ethash_params params;
|
||||
uint8_t *data;
|
||||
uint64_t size;
|
||||
ethash_params_init(¶ms, blockn);
|
||||
params.cache_size = 1024;
|
||||
params.full_size = 1024 * 32;
|
||||
cache.mem = our_alloca(params.cache_size);
|
||||
ethash_mkcache(&cache, ¶ms, &seedhash);
|
||||
|
||||
BOOST_REQUIRE(
|
||||
ethash_io_write("./test_ethash_directory/", ¶ms, seedhash, &cache, &data, &size)
|
||||
);
|
||||
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full")));
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info")));
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
free(data);
|
||||
// cleanup
|
||||
fclose(f);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_match) {
|
||||
ethash_blockhash_t seedhash;
|
||||
static const int blockn = 0;
|
||||
ethash_get_seedhash(&seedhash, blockn);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash)
|
||||
);
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
FILE* f;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_cache cache;
|
||||
ethash_params params;
|
||||
uint8_t *data;
|
||||
uint64_t size;
|
||||
ethash_params_init(¶ms, blockn);
|
||||
params.cache_size = 1024;
|
||||
params.full_size = 1024 * 32;
|
||||
cache.mem = our_alloca(params.cache_size);
|
||||
ethash_mkcache(&cache, ¶ms, &seedhash);
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
// delete the full here so that memory is properly unmapped and FILE handler freed
|
||||
ethash_full_delete(full);
|
||||
// and check that we have a match when checking again
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
|
||||
BOOST_REQUIRE(
|
||||
ethash_io_write("./test_ethash_directory/", ¶ms, seedhash, &cache, &data, &size)
|
||||
);
|
||||
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full")));
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info")));
|
||||
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash)
|
||||
);
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
free(data);
|
||||
// cleanup
|
||||
fclose(f);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
// could have used dev::contentsNew but don't wanna try to import
|
||||
// libdevcore just for one function
|
||||
static std::vector<char> readFileIntoVector(char const* filename)
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_size_mismatch) {
|
||||
static const int blockn = 0;
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(blockn);
|
||||
FILE *f = NULL;
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
fclose(f);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
// and check that we get the size mismatch detected if we request diffferent size
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 65, false)
|
||||
);
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_get_default_dirname) {
|
||||
char result[256];
|
||||
// this is really not an easy thing to test for in a unit test, so yeah it does look ugly
|
||||
#ifdef _WIN32
|
||||
char homedir[256];
|
||||
BOOST_REQUIRE(SUCCEEDED(SHGetFolderPathW(NULL, CSIDL_PROFILE, NULL, 0, (WCHAR*)homedir)));
|
||||
BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
|
||||
std::string res = std::string(homedir) + std::string("\\Appdata\\Ethash\\");
|
||||
#else
|
||||
char* homedir = getenv("HOME");
|
||||
BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
|
||||
std::string res = std::string(homedir) + std::string("/.ethash/");
|
||||
#endif
|
||||
BOOST_CHECK_MESSAGE(strcmp(res.c_str(), result) == 0,
|
||||
"Expected \"" + res + "\" but got \"" + std::string(result) + "\""
|
||||
);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
ethash_h256_t difficulty;
|
||||
ethash_return_value_t light_out;
|
||||
ethash_return_value_t full_out;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
// Set the difficulty
|
||||
ethash_h256_set(&difficulty, 0, 197);
|
||||
ethash_h256_set(&difficulty, 1, 90);
|
||||
for (int i = 2; i < 32; i++)
|
||||
ethash_h256_set(&difficulty, i, 255);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
{
|
||||
const std::string
|
||||
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b",
|
||||
actual = bytesToHexString((uint8_t const *) light->cache, cache_size);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
node node;
|
||||
ethash_calculate_dag_item(&node, 0, light);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &node, sizeof(node)),
|
||||
expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597";
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\n" << "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
for (int i = 0; i < full_size / sizeof(node); ++i) {
|
||||
for (uint32_t j = 0; j < 32; ++j) {
|
||||
node expected_node;
|
||||
ethash_calculate_dag_item(&expected_node, j, light);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &(full->data[j]), sizeof(node)),
|
||||
expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node));
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\ni: " << j << "\n"
|
||||
<< "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
uint64_t nonce = 0x7c7c597c;
|
||||
full_out = ethash_full_compute(full, hash, nonce);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
const std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
const std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
ethash_h256_t check_hash;
|
||||
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
|
||||
const std::string check_hash_string = blockhashToHexString(&check_hash);
|
||||
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
|
||||
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
full_out = ethash_full_compute(full, hash, 5);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string,
|
||||
"\nlight result and full result should differ: " << light_result_string.c_str() << "\n");
|
||||
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, 5);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
light_result_string = blockhashToHexString(&light_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result and full result should be the same\n"
|
||||
<< "light result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty),
|
||||
"ethash_check_difficulty failed"
|
||||
);
|
||||
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty),
|
||||
"ethash_quick_check_difficulty failed"
|
||||
);
|
||||
}
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_full_new_when_dag_exists_with_wrong_size) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
ethash_return_value_t full_out;
|
||||
ethash_return_value_t light_out;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
// first make a DAG file of "wrong size"
|
||||
FILE *f;
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, 64, false)
|
||||
);
|
||||
fclose(f);
|
||||
|
||||
// then create new DAG, which should detect the wrong size and force create a new file
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
BOOST_ASSERT(light);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
{
|
||||
uint64_t nonce = 0x7c7c597c;
|
||||
full_out = ethash_full_compute(full, hash, nonce);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
const std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
const std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
ethash_h256_t check_hash;
|
||||
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
|
||||
const std::string check_hash_string = blockhashToHexString(&check_hash);
|
||||
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
|
||||
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
}
|
||||
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
static bool g_executed = false;
|
||||
static unsigned g_prev_progress = 0;
|
||||
static int test_full_callback(unsigned _progress)
|
||||
{
|
||||
ifstream ifs(filename, ios::binary|ios::ate);
|
||||
ifstream::pos_type pos = ifs.tellg();
|
||||
|
||||
std::vector<char> result((unsigned int)pos);
|
||||
|
||||
ifs.seekg(0, ios::beg);
|
||||
ifs.read(&result[0], pos);
|
||||
|
||||
return result;
|
||||
g_executed = true;
|
||||
BOOST_CHECK(_progress >= g_prev_progress);
|
||||
g_prev_progress = _progress;
|
||||
return 0;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_contents) {
|
||||
ethash_blockhash_t seedhash;
|
||||
static const int blockn = 0;
|
||||
ethash_get_seedhash(&seedhash, blockn);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash)
|
||||
);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
|
||||
ethash_cache cache;
|
||||
ethash_params params;
|
||||
uint8_t *data;
|
||||
uint64_t size;
|
||||
ethash_params_init(¶ms, blockn);
|
||||
params.cache_size = 1024;
|
||||
params.full_size = 1024 * 32;
|
||||
cache.mem = our_alloca(params.cache_size);
|
||||
ethash_mkcache(&cache, ¶ms, &seedhash);
|
||||
|
||||
BOOST_REQUIRE(
|
||||
ethash_io_write("./test_ethash_directory/", ¶ms, seedhash, &cache, &data, &size)
|
||||
);
|
||||
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full")));
|
||||
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info")));
|
||||
|
||||
char expect_buffer[DAG_MEMO_BYTESIZE];
|
||||
ethash_io_serialize_info(REVISION, seedhash, expect_buffer);
|
||||
auto vec = readFileIntoVector("./test_ethash_directory/full.info");
|
||||
BOOST_REQUIRE_EQUAL(vec.size(), DAG_MEMO_BYTESIZE);
|
||||
BOOST_REQUIRE(memcmp(expect_buffer, &vec[0], DAG_MEMO_BYTESIZE) == 0);
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
free(data);
|
||||
static int test_full_callback_that_fails(unsigned _progress)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int test_full_callback_create_incomplete_dag(unsigned _progress)
|
||||
{
|
||||
if (_progress >= 30) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(full_client_callback) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
BOOST_CHECK(g_executed);
|
||||
BOOST_REQUIRE_EQUAL(g_prev_progress, 100);
|
||||
|
||||
ethash_full_delete(full);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(failing_full_client_callback) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback_that_fails
|
||||
);
|
||||
BOOST_ASSERT(!full);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_incomplete_dag_file) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
// create a full but stop at 30%, so no magic number is written
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback_create_incomplete_dag
|
||||
);
|
||||
BOOST_ASSERT(!full);
|
||||
FILE *f = NULL;
|
||||
// confirm that we get a size_mismatch because the magic number is missing
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
|
||||
);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_block_verification) {
|
||||
ethash_light_t light = ethash_light_new(22);
|
||||
ethash_h256_t seedhash = stringToBlockhash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d");
|
||||
BOOST_ASSERT(light);
|
||||
ethash_return_value_t ret = ethash_light_compute(
|
||||
light,
|
||||
seedhash,
|
||||
0x495732e0ed7a801c
|
||||
);
|
||||
BOOST_REQUIRE_EQUAL(blockhashToHexString(&ret.result), "00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614");
|
||||
ethash_light_delete(light);
|
||||
}
|
||||
|
||||
// Test of Full DAG creation with the minimal ethash.h API.
|
||||
// Commented out since travis tests would take too much time.
|
||||
// Uncomment and run on your own machine if you want to confirm
|
||||
// it works fine.
|
||||
#if 0
|
||||
static int lef_cb(unsigned _progress)
|
||||
{
|
||||
printf("CREATING DAG. PROGRESS: %u\n", _progress);
|
||||
fflush(stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(full_dag_test) {
|
||||
ethash_light_t light = ethash_light_new(55);
|
||||
BOOST_ASSERT(light);
|
||||
ethash_full_t full = ethash_full_new(light, lef_cb);
|
||||
BOOST_ASSERT(full);
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
}
|
||||
#endif
|
||||
|
13
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
generated
vendored
13
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
generated
vendored
@ -3,6 +3,13 @@
|
||||
# Strict mode
|
||||
set -e
|
||||
|
||||
VALGRIND_ARGS="--tool=memcheck"
|
||||
VALGRIND_ARGS+=" --leak-check=yes"
|
||||
VALGRIND_ARGS+=" --track-origins=yes"
|
||||
VALGRIND_ARGS+=" --show-reachable=yes"
|
||||
VALGRIND_ARGS+=" --num-callers=20"
|
||||
VALGRIND_ARGS+=" --track-fds=yes"
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
@ -17,3 +24,9 @@ cd $TEST_DIR/build ;
|
||||
cmake ../../.. > /dev/null
|
||||
make Test
|
||||
./test/c/Test
|
||||
|
||||
# If we have valgrind also run memory check tests
|
||||
if hash valgrind 2>/dev/null; then
|
||||
echo "======== Running tests under valgrind ========";
|
||||
cd $TEST_DIR/build/ && valgrind $VALGRIND_ARGS ./test/c/Test
|
||||
fi
|
||||
|
82
Godeps/_workspace/src/github.com/ethereum/ethash/test/go/ethash_test.go
generated
vendored
82
Godeps/_workspace/src/github.com/ethereum/ethash/test/go/ethash_test.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
package ethashTest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
func TestEthash(t *testing.T) {
|
||||
seedHash := make([]byte, 32)
|
||||
_, err := rand.Read(seedHash)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
db, err := ethdb.NewMemDatabase()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
blockProcessor, err := core.NewCanonical(5, db)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Println("Block Number: ", blockProcessor.ChainManager().CurrentBlock().Number())
|
||||
|
||||
e := ethash.New(blockProcessor.ChainManager())
|
||||
|
||||
miningHash := make([]byte, 32)
|
||||
if _, err := rand.Read(miningHash); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
diff := big.NewInt(10000)
|
||||
log.Println("difficulty", diff)
|
||||
|
||||
nonce := uint64(0)
|
||||
|
||||
ghash_full := e.FullHash(nonce, miningHash)
|
||||
log.Printf("ethash full (on nonce): %x %x\n", ghash_full, nonce)
|
||||
|
||||
ghash_light := e.LightHash(nonce, miningHash)
|
||||
log.Printf("ethash light (on nonce): %x %x\n", ghash_light, nonce)
|
||||
|
||||
if bytes.Compare(ghash_full, ghash_light) != 0 {
|
||||
t.Errorf("full: %x, light: %x", ghash_full, ghash_light)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSeedHash(t *testing.T) {
|
||||
seed0, err := ethash.GetSeedHash(0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get seedHash for block 0: %v", err)
|
||||
}
|
||||
if bytes.Compare(seed0, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) != 0 {
|
||||
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
|
||||
}
|
||||
seed1, err := ethash.GetSeedHash(30000)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// From python:
|
||||
// > from pyethash import get_seedhash
|
||||
// > get_seedhash(30000)
|
||||
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bytes.Compare(seed1, expectedSeed1) != 0 {
|
||||
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
|
||||
}
|
||||
|
||||
}
|
9
Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
generated
vendored
9
Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
generated
vendored
@ -24,8 +24,9 @@ fi
|
||||
echo -e "\n################# Testing C ##################"
|
||||
$TEST_DIR/c/test.sh
|
||||
|
||||
echo -e "\n################# Testing Python ##################"
|
||||
$TEST_DIR/python/test.sh
|
||||
# Temporarily commenting out python tests until they conform to the API
|
||||
#echo -e "\n################# Testing Python ##################"
|
||||
#$TEST_DIR/python/test.sh
|
||||
|
||||
#echo "################# Testing Go ##################"
|
||||
#$TEST_DIR/go/test.sh
|
||||
echo "################# Testing Go ##################"
|
||||
cd $TEST_DIR/.. && go test -timeout 9999s
|
||||
|
@ -27,8 +27,10 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
@ -601,12 +603,32 @@ func dump(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func makedag(ctx *cli.Context) {
|
||||
chain, _, _ := utils.GetChain(ctx)
|
||||
pow := ethash.New(chain)
|
||||
fmt.Println("making cache")
|
||||
pow.UpdateCache(0, true)
|
||||
fmt.Println("making DAG")
|
||||
pow.UpdateDAG()
|
||||
args := ctx.Args()
|
||||
wrongArgs := func() {
|
||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||
}
|
||||
switch {
|
||||
case len(args) == 2:
|
||||
blockNum, err := strconv.ParseUint(args[0], 0, 64)
|
||||
dir := args[1]
|
||||
if err != nil {
|
||||
wrongArgs()
|
||||
} else {
|
||||
dir = filepath.Clean(dir)
|
||||
// seems to require a trailing slash
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir = dir + "/"
|
||||
}
|
||||
_, err = ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't find dir")
|
||||
}
|
||||
fmt.Println("making DAG, this could take awhile...")
|
||||
ethash.MakeDAG(blockNum, dir)
|
||||
}
|
||||
default:
|
||||
wrongArgs()
|
||||
}
|
||||
}
|
||||
|
||||
func version(c *cli.Context) {
|
||||
|
@ -316,7 +316,7 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat
|
||||
|
||||
eventMux := new(event.TypeMux)
|
||||
chainManager := core.NewChainManager(blockDb, stateDb, eventMux)
|
||||
pow := ethash.New(chainManager)
|
||||
pow := ethash.New()
|
||||
txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit)
|
||||
blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux)
|
||||
chainManager.SetProcessor(blockProcessor)
|
||||
|
@ -14,8 +14,8 @@ import (
|
||||
// So we can generate blocks easily
|
||||
type FakePow struct{}
|
||||
|
||||
func (f FakePow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) {
|
||||
return 0, nil, nil
|
||||
func (f FakePow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
|
||||
return 0, nil
|
||||
}
|
||||
func (f FakePow) Verify(block pow.Block) bool { return true }
|
||||
func (f FakePow) GetHashrate() int64 { return 0 }
|
||||
|
@ -220,7 +220,7 @@ func New(config *Config) (*Ethereum, error) {
|
||||
|
||||
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
|
||||
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.GetBlock)
|
||||
eth.pow = ethash.New(eth.chainManager)
|
||||
eth.pow = ethash.New()
|
||||
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
|
||||
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
|
||||
eth.chainManager.SetProcessor(eth.blockProcessor)
|
||||
@ -318,7 +318,6 @@ func (s *Ethereum) PeersInfo() (peersinfo []*PeerInfo) {
|
||||
|
||||
func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
|
||||
s.chainManager.ResetWithGenesisBlock(gb)
|
||||
s.pow.UpdateCache(0, true)
|
||||
}
|
||||
|
||||
func (s *Ethereum) StartMining() error {
|
||||
|
@ -85,7 +85,7 @@ func (self *CpuMiner) mine(block *types.Block) {
|
||||
self.chMu.Unlock()
|
||||
|
||||
// Mine
|
||||
nonce, mixDigest, _ := self.pow.Search(block, self.quitCurrentOp)
|
||||
nonce, mixDigest := self.pow.Search(block, self.quitCurrentOp)
|
||||
if nonce != 0 {
|
||||
block.SetNonce(nonce)
|
||||
block.Header().MixDigest = common.BytesToHash(mixDigest)
|
||||
|
@ -3,7 +3,6 @@ package miner
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
@ -41,13 +40,7 @@ func (self *Miner) Mining() bool {
|
||||
func (self *Miner) Start(coinbase common.Address) {
|
||||
self.mining = true
|
||||
self.worker.coinbase = coinbase
|
||||
|
||||
if self.threads > 0 {
|
||||
self.pow.(*ethash.Ethash).UpdateDAG()
|
||||
}
|
||||
|
||||
self.worker.start()
|
||||
|
||||
self.worker.commitNewWork()
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
)
|
||||
|
||||
@ -44,7 +44,7 @@ func (dag *Dagger) Find(obj *big.Int, resChan chan int64) {
|
||||
resChan <- 0
|
||||
}
|
||||
|
||||
func (dag *Dagger) Search(hash, diff *big.Int) ([]byte, []byte, []byte) {
|
||||
func (dag *Dagger) Search(hash, diff *big.Int) (uint64, []byte) {
|
||||
// TODO fix multi threading. Somehow it results in the wrong nonce
|
||||
amountOfRoutines := 1
|
||||
|
||||
@ -69,7 +69,7 @@ func (dag *Dagger) Search(hash, diff *big.Int) ([]byte, []byte, []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
return big.NewInt(res).Bytes(), nil, nil
|
||||
return uint64(res), nil
|
||||
}
|
||||
|
||||
func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool {
|
||||
|
@ -32,7 +32,7 @@ func (pow *EasyPow) Turbo(on bool) {
|
||||
pow.turbo = on
|
||||
}
|
||||
|
||||
func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) {
|
||||
func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
hash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
@ -57,7 +57,7 @@ empty:
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return 0, nil, nil
|
||||
return 0, nil
|
||||
default:
|
||||
i++
|
||||
|
||||
@ -67,7 +67,7 @@ empty:
|
||||
|
||||
sha := uint64(r.Int63())
|
||||
if verify(hash, diff, sha) {
|
||||
return sha, nil, nil
|
||||
return sha, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ empty:
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (pow *EasyPow) Verify(block pow.Block) bool {
|
||||
|
@ -1,7 +1,7 @@
|
||||
package pow
|
||||
|
||||
type PoW interface {
|
||||
Search(block Block, stop <-chan struct{}) (uint64, []byte, []byte)
|
||||
Search(block Block, stop <-chan struct{}) (uint64, []byte)
|
||||
Verify(block Block) bool
|
||||
GetHashrate() int64
|
||||
Turbo(bool)
|
||||
|
@ -13,42 +13,37 @@ import (
|
||||
|
||||
// TODO: refactor test setup & execution to better align with vm and tx tests
|
||||
func TestBcValidBlockTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
runBlockTestsInFile("files/BlockTests/bcValidBlockTest.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcUncleTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
|
||||
runBlockTestsInFile("files/BlockTests/bcUncleTest.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcUncleHeaderValidityTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
|
||||
runBlockTestsInFile("files/BlockTests/bcUncleHeaderValiditiy.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcInvalidHeaderTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
runBlockTestsInFile("files/BlockTests/bcInvalidHeaderTest.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcInvalidRLPTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
runBlockTestsInFile("files/BlockTests/bcInvalidRLPTest.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcJSAPITests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
runBlockTestsInFile("files/BlockTests/bcJS_API_Test.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcRPCAPITests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
|
||||
runBlockTestsInFile("files/BlockTests/bcRPC_API_Test.json", []string{}, t)
|
||||
}
|
||||
|
||||
func TestBcForkBlockTests(t *testing.T) {
|
||||
t.Skip("Skipped in lieu of performance fixes.")
|
||||
runBlockTestsInFile("files/BlockTests/bcForkBlockTest.json", []string{}, t)
|
||||
}
|
||||
|
||||
@ -71,7 +66,6 @@ func runBlockTestsInFile(filepath string, snafus []string, t *testing.T) {
|
||||
}
|
||||
|
||||
func runBlockTest(name string, test *BlockTest, t *testing.T) {
|
||||
t.Log("Running test: ", name)
|
||||
cfg := testEthConfig()
|
||||
ethereum, err := eth.New(cfg)
|
||||
if err != nil {
|
||||
@ -100,7 +94,7 @@ func runBlockTest(name string, test *BlockTest, t *testing.T) {
|
||||
if err = test.ValidatePostState(statedb); err != nil {
|
||||
t.Fatal("post state validation failed: %v", err)
|
||||
}
|
||||
t.Log("Test passed: ", name)
|
||||
t.Log("Test passed: ", name)
|
||||
}
|
||||
|
||||
func testEthConfig() *eth.Config {
|
||||
|
Loading…
Reference in New Issue
Block a user