Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf87713dd4 | ||
|
|
ac92d7c411 | ||
|
|
d5a79934dc | ||
|
|
0424192e61 | ||
|
|
9c2882b2e5 | ||
|
|
1a0eb903f1 | ||
|
|
0036e2a747 | ||
|
|
727eadacca | ||
|
|
99cba96f26 | ||
|
|
f272879e5a | ||
|
|
72dd51e25a | ||
|
|
799a469000 | ||
|
|
f4d81178d8 | ||
|
|
310d2e7ef4 | ||
|
|
3ecde4e2aa | ||
|
|
a355b401db | ||
|
|
cba33029a8 | ||
|
|
9702badd83 | ||
|
|
067dc2cbf5 |
@@ -182,10 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := crypto.ToECDSA(keyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := crypto.ToECDSAUnsafe(keyBytes)
|
||||
|
||||
return &Key{
|
||||
Id: uuid.UUID(keyId),
|
||||
Address: crypto.PubkeyToAddress(key.PublicKey),
|
||||
|
||||
@@ -74,10 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := crypto.Keccak256(plainText)
|
||||
ecKey, err := crypto.ToECDSA(ethPriv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecKey := crypto.ToECDSAUnsafe(ethPriv)
|
||||
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM alpine:3.5
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
||||
@@ -251,7 +251,7 @@ func (pool *TxPool) resetState() {
|
||||
}
|
||||
// Check the queue and move transactions over to the pending if possible
|
||||
// or remove those that have become invalid
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, nil)
|
||||
}
|
||||
|
||||
// Stop terminates the transaction pool.
|
||||
@@ -339,17 +339,6 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check queue first
|
||||
pool.promoteExecutables(state)
|
||||
|
||||
// invalidate any txs
|
||||
pool.demoteUnexecutables(state)
|
||||
|
||||
pending := make(map[common.Address]types.Transactions)
|
||||
for addr, list := range pool.pending {
|
||||
pending[addr] = list.Flatten()
|
||||
@@ -551,13 +540,14 @@ func (pool *TxPool) Add(tx *types.Transaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we added a new transaction, run promotion checks and return
|
||||
if !replace {
|
||||
pool.promoteExecutables(state)
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
pool.promoteExecutables(state, []common.Address{from})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -568,24 +558,26 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Add the batch of transaction, tracking the accepted ones
|
||||
replaced, added := true, 0
|
||||
dirty := make(map[common.Address]struct{})
|
||||
for _, tx := range txs {
|
||||
if replace, err := pool.add(tx); err == nil {
|
||||
added++
|
||||
if !replace {
|
||||
replaced = false
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
if added > 0 {
|
||||
if len(dirty) > 0 {
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !replaced {
|
||||
pool.promoteExecutables(state)
|
||||
addrs := make([]common.Address, 0, len(dirty))
|
||||
for addr, _ := range dirty {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
pool.promoteExecutables(state, addrs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -662,12 +654,23 @@ func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
// promoteExecutables moves transactions that have become processable from the
|
||||
// future queue to the set of pending transactions. During this process, all
|
||||
// invalidated transactions (low nonce, low balance) are deleted.
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.Address) {
|
||||
gaslimit := pool.gasLimit()
|
||||
|
||||
// Gather all the accounts potentially needing updates
|
||||
if accounts == nil {
|
||||
accounts = make([]common.Address, 0, len(pool.queue))
|
||||
for addr, _ := range pool.queue {
|
||||
accounts = append(accounts, addr)
|
||||
}
|
||||
}
|
||||
// Iterate over all accounts and promote any executable transactions
|
||||
queued := uint64(0)
|
||||
for addr, list := range pool.queue {
|
||||
for _, addr := range accounts {
|
||||
list := pool.queue[addr]
|
||||
if list == nil {
|
||||
continue // Just in case someone calls with a non existing account
|
||||
}
|
||||
// Drop all transactions that are deemed too old (low nonce)
|
||||
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
pool.resetState()
|
||||
pool.enqueueTx(tx.Hash(), tx)
|
||||
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
if len(pool.pending) != 1 {
|
||||
t.Error("expected valid txs to be 1 is", len(pool.pending))
|
||||
}
|
||||
@@ -184,7 +184,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
from, _ = deriveSender(tx)
|
||||
currentState.SetNonce(from, 2)
|
||||
pool.enqueueTx(tx.Hash(), tx)
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
|
||||
t.Error("expected transaction to be in tx pool")
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
pool.enqueueTx(tx2.Hash(), tx2)
|
||||
pool.enqueueTx(tx3.Hash(), tx3)
|
||||
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
|
||||
if len(pool.pending) != 1 {
|
||||
t.Error("expected tx pool to be 1, got", len(pool.pending))
|
||||
@@ -304,16 +304,16 @@ func TestTransactionDoubleNonce(t *testing.T) {
|
||||
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
|
||||
}
|
||||
state, _ := pool.currentState()
|
||||
pool.promoteExecutables(state)
|
||||
pool.promoteExecutables(state, []common.Address{addr})
|
||||
if pool.pending[addr].Len() != 1 {
|
||||
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
|
||||
}
|
||||
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
|
||||
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
|
||||
}
|
||||
// Add the thid transaction and ensure it's not saved (smaller price)
|
||||
// Add the third transaction and ensure it's not saved (smaller price)
|
||||
pool.add(tx3)
|
||||
pool.promoteExecutables(state)
|
||||
pool.promoteExecutables(state, []common.Address{addr})
|
||||
if pool.pending[addr].Len() != 1 {
|
||||
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
|
||||
}
|
||||
@@ -1087,7 +1087,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
|
||||
// Benchmark the speed of pool validation
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.promoteExecutables(state)
|
||||
pool.promoteExecutables(state, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,41 +22,39 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var bigMaxUint64 = new(big.Int).SetUint64(^uint64(0))
|
||||
|
||||
// destinations stores one map per contract (keyed by hash of code).
|
||||
// The maps contain an entry for each location of a JUMPDEST
|
||||
// instruction.
|
||||
type destinations map[common.Hash]map[uint64]struct{}
|
||||
type destinations map[common.Hash][]byte
|
||||
|
||||
// has checks whether code has a JUMPDEST at dest.
|
||||
func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool {
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 64bits.
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
|
||||
// Don't bother checking for JUMPDEST in that case.
|
||||
if dest.Cmp(bigMaxUint64) > 0 {
|
||||
udest := dest.Uint64()
|
||||
if dest.BitLen() >= 63 || udest >= uint64(len(code)) {
|
||||
return false
|
||||
}
|
||||
|
||||
m, analysed := d[codehash]
|
||||
if !analysed {
|
||||
m = jumpdests(code)
|
||||
d[codehash] = m
|
||||
}
|
||||
_, ok := m[dest.Uint64()]
|
||||
return ok
|
||||
return (m[udest/8] & (1 << (udest % 8))) != 0
|
||||
}
|
||||
|
||||
// jumpdests creates a map that contains an entry for each
|
||||
// PC location that is a JUMPDEST instruction.
|
||||
func jumpdests(code []byte) map[uint64]struct{} {
|
||||
m := make(map[uint64]struct{})
|
||||
func jumpdests(code []byte) []byte {
|
||||
m := make([]byte, len(code)/8+1)
|
||||
for pc := uint64(0); pc < uint64(len(code)); pc++ {
|
||||
var op OpCode = OpCode(code[pc])
|
||||
switch op {
|
||||
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
|
||||
op := OpCode(code[pc])
|
||||
if op == JUMPDEST {
|
||||
m[pc/8] |= 1 << (pc % 8)
|
||||
} else if op >= PUSH1 && op <= PUSH32 {
|
||||
a := uint64(op) - uint64(PUSH1) + 1
|
||||
pc += a
|
||||
case JUMPDEST:
|
||||
m[pc] = struct{}{}
|
||||
}
|
||||
}
|
||||
return m
|
||||
|
||||
@@ -68,9 +68,6 @@ func Keccak512(data ...[]byte) []byte {
|
||||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
// Deprecated: For backward compatibility as other packages depend on these
|
||||
func Sha3Hash(data ...[]byte) common.Hash { return Keccak256Hash(data...) }
|
||||
|
||||
// Creates an ethereum address given the bytes and the nonce
|
||||
func CreateAddress(b common.Address, nonce uint64) common.Address {
|
||||
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})
|
||||
@@ -79,9 +76,24 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
|
||||
|
||||
// ToECDSA creates a private key with the given D value.
|
||||
func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {
|
||||
return toECDSA(d, true)
|
||||
}
|
||||
|
||||
// ToECDSAUnsafe blidly converts a binary blob to a private key. It should almost
|
||||
// never be used unless you are sure the input is valid and want to avoid hitting
|
||||
// errors due to bad origin encoding (0 prefixes cut off).
|
||||
func ToECDSAUnsafe(d []byte) *ecdsa.PrivateKey {
|
||||
priv, _ := toECDSA(d, false)
|
||||
return priv
|
||||
}
|
||||
|
||||
// toECDSA creates a private key with the given D value. The strict parameter
|
||||
// controls whether the key's length should be enforced at the curve size or
|
||||
// it can also accept legacy encodings (0 prefixes).
|
||||
func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {
|
||||
priv := new(ecdsa.PrivateKey)
|
||||
priv.PublicKey.Curve = S256()
|
||||
if 8*len(d) != priv.Params().BitSize {
|
||||
if strict && 8*len(d) != priv.Params().BitSize {
|
||||
return nil, fmt.Errorf("invalid length, need %d bits", priv.Params().BitSize)
|
||||
}
|
||||
priv.D = new(big.Int).SetBytes(d)
|
||||
@@ -89,11 +101,12 @@ func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
func FromECDSA(prv *ecdsa.PrivateKey) []byte {
|
||||
if prv == nil {
|
||||
// FromECDSA exports a private key into a binary dump.
|
||||
func FromECDSA(priv *ecdsa.PrivateKey) []byte {
|
||||
if priv == nil {
|
||||
return nil
|
||||
}
|
||||
return math.PaddedBigBytes(prv.D, 32)
|
||||
return math.PaddedBigBytes(priv.D, priv.Params().BitSize/8)
|
||||
}
|
||||
|
||||
func ToECDSAPub(pub []byte) *ecdsa.PublicKey {
|
||||
@@ -121,7 +134,6 @@ func HexToECDSA(hexkey string) (*ecdsa.PrivateKey, error) {
|
||||
}
|
||||
|
||||
// LoadECDSA loads a secp256k1 private key from the given file.
|
||||
// The key data is expected to be hex-encoded.
|
||||
func LoadECDSA(file string) (*ecdsa.PrivateKey, error) {
|
||||
buf := make([]byte, 64)
|
||||
fd, err := os.Open(file)
|
||||
|
||||
@@ -36,7 +36,7 @@ var testPrivHex = "289c2857d4598e37fb9647507e47a309d6133539bf21a8b9cb6df88fd5232
|
||||
// These tests are sanity checks.
|
||||
// They should ensure that we don't e.g. use Sha3-224 instead of Sha3-256
|
||||
// and that the sha3 library uses keccak-f permutation.
|
||||
func TestSha3Hash(t *testing.T) {
|
||||
func TestKeccak256Hash(t *testing.T) {
|
||||
msg := []byte("abc")
|
||||
exp, _ := hex.DecodeString("4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45")
|
||||
checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Keccak256Hash(in); return h[:] }, msg, exp)
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -119,7 +120,7 @@ func (s *Service) Stop() error {
|
||||
// loop keeps trying to connect to the netstats server, reporting chain events
|
||||
// until termination.
|
||||
func (s *Service) loop() {
|
||||
// Subscribe tso chain events to execute updates on
|
||||
// Subscribe to chain events to execute updates on
|
||||
var emux *event.TypeMux
|
||||
if s.eth != nil {
|
||||
emux = s.eth.EventMux()
|
||||
@@ -132,6 +133,46 @@ func (s *Service) loop() {
|
||||
txSub := emux.Subscribe(core.TxPreEvent{})
|
||||
defer txSub.Unsubscribe()
|
||||
|
||||
// Start a goroutine that exhausts the subsciptions to avoid events piling up
|
||||
var (
|
||||
quitCh = make(chan struct{})
|
||||
headCh = make(chan *types.Block, 1)
|
||||
txCh = make(chan struct{}, 1)
|
||||
)
|
||||
go func() {
|
||||
var lastTx mclock.AbsTime
|
||||
|
||||
for {
|
||||
select {
|
||||
// Notify of chain head events, but drop if too frequent
|
||||
case head, ok := <-headSub.Chan():
|
||||
if !ok { // node stopped
|
||||
close(quitCh)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case headCh <- head.Data.(core.ChainHeadEvent).Block:
|
||||
default:
|
||||
}
|
||||
|
||||
// Notify of new transaction events, but drop if too frequent
|
||||
case _, ok := <-txSub.Chan():
|
||||
if !ok { // node stopped
|
||||
close(quitCh)
|
||||
return
|
||||
}
|
||||
if time.Duration(mclock.Now()-lastTx) < time.Second {
|
||||
continue
|
||||
}
|
||||
lastTx = mclock.Now()
|
||||
|
||||
select {
|
||||
case txCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Loop reporting until termination
|
||||
for {
|
||||
// Resolve the URL, defaulting to TLS, but falling back to none too
|
||||
@@ -151,7 +192,7 @@ func (s *Service) loop() {
|
||||
if conf, err = websocket.NewConfig(url, "http://localhost/"); err != nil {
|
||||
continue
|
||||
}
|
||||
conf.Dialer = &net.Dialer{Timeout: 3 * time.Second}
|
||||
conf.Dialer = &net.Dialer{Timeout: 5 * time.Second}
|
||||
if conn, err = websocket.DialConfig(conf); err == nil {
|
||||
break
|
||||
}
|
||||
@@ -181,6 +222,10 @@ func (s *Service) loop() {
|
||||
|
||||
for err == nil {
|
||||
select {
|
||||
case <-quitCh:
|
||||
conn.Close()
|
||||
return
|
||||
|
||||
case <-fullReport.C:
|
||||
if err = s.report(conn); err != nil {
|
||||
log.Warn("Full stats report failed", "err", err)
|
||||
@@ -189,30 +234,14 @@ func (s *Service) loop() {
|
||||
if err = s.reportHistory(conn, list); err != nil {
|
||||
log.Warn("Requested history report failed", "err", err)
|
||||
}
|
||||
case head, ok := <-headSub.Chan():
|
||||
if !ok { // node stopped
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
if err = s.reportBlock(conn, head.Data.(core.ChainHeadEvent).Block); err != nil {
|
||||
case head := <-headCh:
|
||||
if err = s.reportBlock(conn, head); err != nil {
|
||||
log.Warn("Block stats report failed", "err", err)
|
||||
}
|
||||
if err = s.reportPending(conn); err != nil {
|
||||
log.Warn("Post-block transaction stats report failed", "err", err)
|
||||
}
|
||||
case _, ok := <-txSub.Chan():
|
||||
if !ok { // node stopped
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
// Exhaust events to avoid reporting too frequently
|
||||
for exhausted := false; !exhausted; {
|
||||
select {
|
||||
case <-headSub.Chan():
|
||||
default:
|
||||
exhausted = true
|
||||
}
|
||||
}
|
||||
case <-txCh:
|
||||
if err = s.reportPending(conn); err != nil {
|
||||
log.Warn("Transaction stats report failed", "err", err)
|
||||
}
|
||||
@@ -398,7 +427,7 @@ func (s *Service) reportLatency(conn *websocket.Conn) error {
|
||||
select {
|
||||
case <-s.pongCh:
|
||||
// Pong delivered, report the latency
|
||||
case <-time.After(3 * time.Second):
|
||||
case <-time.After(5 * time.Second):
|
||||
// Ping timeout, abort
|
||||
return errors.New("ping timed out")
|
||||
}
|
||||
|
||||
@@ -211,8 +211,9 @@ type PrivateAccountAPI struct {
|
||||
// NewPrivateAccountAPI create a new PrivateAccountAPI.
|
||||
func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
|
||||
return &PrivateAccountAPI{
|
||||
am: b.AccountManager(),
|
||||
b: b,
|
||||
am: b.AccountManager(),
|
||||
nonceLock: nonceLock,
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 6 // Minor version component of the current release
|
||||
VersionPatch = 2 // Patch version component of the current release
|
||||
VersionPatch = 5 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
||||
2
swarm/dev/.dockerignore
Normal file
2
swarm/dev/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
bin/*
|
||||
cluster/*
|
||||
2
swarm/dev/.gitignore
vendored
Normal file
2
swarm/dev/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
bin/*
|
||||
cluster/*
|
||||
42
swarm/dev/Dockerfile
Normal file
42
swarm/dev/Dockerfile
Normal file
@@ -0,0 +1,42 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
# install build + test dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
fuse \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
less \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
&& \
|
||||
apt-get clean
|
||||
|
||||
# install Go
|
||||
ENV GO_VERSION 1.8.1
|
||||
RUN curl -fSLo golang.tar.gz "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" && \
|
||||
tar -xzf golang.tar.gz -C /usr/local && \
|
||||
rm golang.tar.gz
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
# install docker CLI
|
||||
RUN curl -fSLo docker.tar.gz https://get.docker.com/builds/Linux/x86_64/docker-17.04.0-ce.tgz && \
|
||||
tar -xzf docker.tar.gz -C /usr/local/bin --strip-components=1 docker/docker && \
|
||||
rm docker.tar.gz
|
||||
|
||||
# install jq
|
||||
RUN curl -fSLo /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && \
|
||||
chmod +x /usr/local/bin/jq
|
||||
|
||||
# install govendor
|
||||
RUN go get -u github.com/kardianos/govendor
|
||||
|
||||
# add custom bashrc
|
||||
ADD bashrc /root/.bashrc
|
||||
14
swarm/dev/Makefile
Normal file
14
swarm/dev/Makefile
Normal file
@@ -0,0 +1,14 @@
|
||||
.PHONY: build cluster test
|
||||
|
||||
default: build
|
||||
|
||||
build:
|
||||
go build -o bin/swarm github.com/ethereum/go-ethereum/cmd/swarm
|
||||
go build -o bin/geth github.com/ethereum/go-ethereum/cmd/geth
|
||||
go build -o bin/bootnode github.com/ethereum/go-ethereum/cmd/bootnode
|
||||
|
||||
cluster: build
|
||||
scripts/boot-cluster.sh
|
||||
|
||||
test:
|
||||
go test -v github.com/ethereum/go-ethereum/swarm/...
|
||||
20
swarm/dev/README.md
Normal file
20
swarm/dev/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
Swarm development environment
|
||||
=============================
|
||||
|
||||
The Swarm development environment is a Linux bash shell which can be run in a
|
||||
Docker container and provides a predictable build and test environment.
|
||||
|
||||
### Start the Docker container
|
||||
|
||||
Run the `run.sh` script to build the Docker image and run it, you will then be
|
||||
at a bash prompt inside the `swarm/dev` directory.
|
||||
|
||||
### Build binaries
|
||||
|
||||
Run `make` to build the `swarm`, `geth` and `bootnode` binaries into the
|
||||
`swarm/dev/bin` directory.
|
||||
|
||||
### Boot a cluster
|
||||
|
||||
Run `make cluster` to start a 3 node Swarm cluster, or run
|
||||
`scripts/boot-cluster.sh --size N` to boot a cluster of size N.
|
||||
21
swarm/dev/bashrc
Normal file
21
swarm/dev/bashrc
Normal file
@@ -0,0 +1,21 @@
|
||||
export ROOT="${GOPATH}/src/github.com/ethereum/go-ethereum"
|
||||
export PATH="${ROOT}/swarm/dev/bin:${PATH}"
|
||||
|
||||
cd "${ROOT}/swarm/dev"
|
||||
|
||||
cat <<WELCOME
|
||||
|
||||
=============================================
|
||||
|
||||
Welcome to the swarm development environment.
|
||||
|
||||
- Run 'make' to build the swarm, geth and bootnode binaries
|
||||
- Run 'make test' to run the swarm unit tests
|
||||
- Run 'make cluster' to start a swarm cluster
|
||||
- Run 'exit' to exit the development environment
|
||||
|
||||
See the 'scripts' directory for some useful scripts.
|
||||
|
||||
=============================================
|
||||
|
||||
WELCOME
|
||||
90
swarm/dev/run.sh
Executable file
90
swarm/dev/run.sh
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# A script to build and run the Swarm development environment using Docker.
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
|
||||
# DEFAULT_NAME is the default name for the Docker image and container
|
||||
DEFAULT_NAME="swarm-dev"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Build and run the Swarm development environment.
|
||||
|
||||
Depends on Docker being installed locally.
|
||||
|
||||
OPTIONS:
|
||||
-n, --name NAME Docker image and container name [default: ${DEFAULT_NAME}]
|
||||
-d, --docker-args ARGS Custom args to pass to 'docker run' (e.g. '-p 8000:8000' to expose a port)
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local name="${DEFAULT_NAME}"
|
||||
local docker_args=""
|
||||
parse_args "$@"
|
||||
build_image
|
||||
run_image
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-n | --name)
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "ERROR: --name flag requires an argument" >&2
|
||||
exit 1
|
||||
fi
|
||||
name="$2"
|
||||
shift 2
|
||||
;;
|
||||
-d | --docker-args)
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "ERROR: --docker-args flag requires an argument" >&2
|
||||
exit 1
|
||||
fi
|
||||
docker_args="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
echo "ERROR: invalid arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
build_image() {
|
||||
docker build --tag "${name}" "${ROOT}/swarm/dev"
|
||||
}
|
||||
|
||||
run_image() {
|
||||
exec docker run \
|
||||
--privileged \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--hostname "${name}" \
|
||||
--name "${name}" \
|
||||
--volume "${ROOT}:/go/src/github.com/ethereum/go-ethereum" \
|
||||
--volume "/var/run/docker.sock:/var/run/docker.sock" \
|
||||
${docker_args} \
|
||||
"${name}" \
|
||||
/bin/bash
|
||||
}
|
||||
|
||||
main "$@"
|
||||
288
swarm/dev/scripts/boot-cluster.sh
Executable file
288
swarm/dev/scripts/boot-cluster.sh
Executable file
@@ -0,0 +1,288 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to boot a dev swarm cluster on a Linux host (typically in a Docker
|
||||
# container started with swarm/dev/run.sh).
|
||||
#
|
||||
# The cluster contains a bootnode, a geth node and multiple swarm nodes, with
|
||||
# each node having its own data directory in a base directory passed with the
|
||||
# --dir flag (default is swarm/dev/cluster).
|
||||
#
|
||||
# To avoid using different ports for each node and to make networking more
|
||||
# realistic, each node gets its own network namespace with IPs assigned from
|
||||
# the 192.168.33.0/24 subnet:
|
||||
#
|
||||
# bootnode: 192.168.33.2
|
||||
# geth: 192.168.33.3
|
||||
# swarm: 192.168.33.10{1,2,...,n}
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
# DEFAULT_BASE_DIR is the default base directory to store node data
|
||||
DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster"
|
||||
|
||||
# DEFAULT_CLUSTER_SIZE is the default swarm cluster size
|
||||
DEFAULT_CLUSTER_SIZE=3
|
||||
|
||||
# Linux bridge configuration for connecting the node network namespaces
|
||||
BRIDGE_NAME="swarmbr0"
|
||||
BRIDGE_IP="192.168.33.1"
|
||||
|
||||
# static bootnode configuration
|
||||
BOOTNODE_IP="192.168.33.2"
|
||||
BOOTNODE_PORT="30301"
|
||||
BOOTNODE_KEY="32078f313bea771848db70745225c52c00981589ad6b5b49163f0f5ee852617d"
|
||||
BOOTNODE_PUBKEY="760c4460e5336ac9bbd87952a3c7ec4363fc0a97bd31c86430806e287b437fd1b01abc6e1db640cf3106b520344af1d58b00b57823db3e1407cbc433e1b6d04d"
|
||||
BOOTNODE_URL="enode://${BOOTNODE_PUBKEY}@${BOOTNODE_IP}:${BOOTNODE_PORT}"
|
||||
|
||||
# static geth configuration
|
||||
GETH_IP="192.168.33.3"
|
||||
GETH_RPC_PORT="8545"
|
||||
GETH_RPC_URL="http://${GETH_IP}:${GETH_RPC_PORT}"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Boot a dev swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-d, --dir DIR Base directory to store node data [default: ${DEFAULT_BASE_DIR}]
|
||||
-s, --size SIZE Size of swarm cluster [default: ${DEFAULT_CLUSTER_SIZE}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local base_dir="${DEFAULT_BASE_DIR}"
|
||||
local cluster_size="${DEFAULT_CLUSTER_SIZE}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
local pid_dir="${base_dir}/pids"
|
||||
local log_dir="${base_dir}/logs"
|
||||
mkdir -p "${base_dir}" "${pid_dir}" "${log_dir}"
|
||||
|
||||
stop_cluster
|
||||
create_network
|
||||
start_bootnode
|
||||
start_geth_node
|
||||
start_swarm_nodes
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-d | --dir)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--dir flag requires an argument"
|
||||
fi
|
||||
base_dir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s | --size)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--size flag requires an argument"
|
||||
fi
|
||||
cluster_size="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_cluster() {
|
||||
info "stopping existing cluster"
|
||||
"${ROOT}/swarm/dev/scripts/stop-cluster.sh" --dir "${base_dir}"
|
||||
}
|
||||
|
||||
# create_network creates a Linux bridge which is used to connect the node
|
||||
# network namespaces together
|
||||
create_network() {
|
||||
local subnet="${BRIDGE_IP}/24"
|
||||
|
||||
info "creating ${subnet} network on ${BRIDGE_NAME}"
|
||||
ip link add name "${BRIDGE_NAME}" type bridge
|
||||
ip link set dev "${BRIDGE_NAME}" up
|
||||
ip address add "${subnet}" dev "${BRIDGE_NAME}"
|
||||
}
|
||||
|
||||
# start_bootnode starts a bootnode which is used to bootstrap the geth and
|
||||
# swarm nodes
|
||||
start_bootnode() {
|
||||
local key_file="${base_dir}/bootnode.key"
|
||||
echo -n "${BOOTNODE_KEY}" > "${key_file}"
|
||||
|
||||
local args=(
|
||||
--addr "${BOOTNODE_IP}:${BOOTNODE_PORT}"
|
||||
--nodekey "${key_file}"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "bootnode" "${BOOTNODE_IP}" "$(which bootnode)" ${args[@]}
|
||||
}
|
||||
|
||||
# start_geth_node starts a geth node with --datadir pointing at <base-dir>/geth
|
||||
# and a single, unlocked account with password "geth"
|
||||
start_geth_node() {
|
||||
local dir="${base_dir}/geth"
|
||||
mkdir -p "${dir}"
|
||||
|
||||
local password="geth"
|
||||
echo "${password}" > "${dir}/password"
|
||||
|
||||
# create an account if necessary
|
||||
if [[ ! -e "${dir}/keystore" ]]; then
|
||||
info "creating geth account"
|
||||
create_account "${dir}" "${password}"
|
||||
fi
|
||||
|
||||
# get the account address
|
||||
local address="$(jq --raw-output '.address' ${dir}/keystore/*)"
|
||||
if [[ -z "${address}" ]]; then
|
||||
fail "failed to get geth account address"
|
||||
fi
|
||||
|
||||
local args=(
|
||||
--datadir "${dir}"
|
||||
--networkid "321"
|
||||
--bootnodes "${BOOTNODE_URL}"
|
||||
--unlock "${address}"
|
||||
--password "${dir}/password"
|
||||
--rpc
|
||||
--rpcaddr "${GETH_IP}"
|
||||
--rpcport "${GETH_RPC_PORT}"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "geth" "${GETH_IP}" "$(which geth)" ${args[@]}
|
||||
}
|
||||
|
||||
start_swarm_nodes() {
|
||||
for i in $(seq 1 ${cluster_size}); do
|
||||
start_swarm_node "${i}"
|
||||
done
|
||||
}
|
||||
|
||||
# start_swarm_node starts a swarm node with a name like "swarmNN" (where NN is
|
||||
# a zero-padded integer like "07"), --datadir pointing at <base-dir>/<name>
|
||||
# (e.g. <base-dir>/swarm07) and a single account with <name> as the password
|
||||
start_swarm_node() {
|
||||
local num=$1
|
||||
local name="swarm$(printf '%02d' ${num})"
|
||||
local ip="192.168.33.1$(printf '%02d' ${num})"
|
||||
|
||||
local dir="${base_dir}/${name}"
|
||||
mkdir -p "${dir}"
|
||||
|
||||
local password="${name}"
|
||||
echo "${password}" > "${dir}/password"
|
||||
|
||||
# create an account if necessary
|
||||
if [[ ! -e "${dir}/keystore" ]]; then
|
||||
info "creating account for ${name}"
|
||||
create_account "${dir}" "${password}"
|
||||
fi
|
||||
|
||||
# get the account address
|
||||
local address="$(jq --raw-output '.address' ${dir}/keystore/*)"
|
||||
if [[ -z "${address}" ]]; then
|
||||
fail "failed to get swarm account address"
|
||||
fi
|
||||
|
||||
local args=(
|
||||
--bootnodes "${BOOTNODE_URL}"
|
||||
--datadir "${dir}"
|
||||
--identity "${name}"
|
||||
--ethapi "${GETH_RPC_URL}"
|
||||
--bzznetworkid "321"
|
||||
--bzzaccount "${address}"
|
||||
--password "${dir}/password"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "${name}" "${ip}" "$(which swarm)" ${args[@]}
|
||||
}
|
||||
|
||||
# start_node runs the node command as a daemon in a network namespace
|
||||
start_node() {
|
||||
local name="$1"
|
||||
local ip="$2"
|
||||
local path="$3"
|
||||
local cmd_args=${@:4}
|
||||
|
||||
info "starting ${name} with IP ${ip}"
|
||||
|
||||
create_node_network "${name}" "${ip}"
|
||||
|
||||
# add a marker to the log file
|
||||
cat >> "${log_dir}/${name}.log" <<EOF
|
||||
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
Starting ${name} node - $(date)
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
|
||||
EOF
|
||||
|
||||
# run the command in the network namespace using start-stop-daemon to
|
||||
# daemonise the process, sending all output to the log file
|
||||
local daemon_args=(
|
||||
--start
|
||||
--background
|
||||
--no-close
|
||||
--make-pidfile
|
||||
--pidfile "${pid_dir}/${name}.pid"
|
||||
--exec "${path}"
|
||||
)
|
||||
if ! ip netns exec "${name}" start-stop-daemon ${daemon_args[@]} -- $cmd_args &>> "${log_dir}/${name}.log"; then
|
||||
fail "could not start ${name}, check ${log_dir}/${name}.log"
|
||||
fi
|
||||
}
|
||||
|
||||
# create_node_network creates a network namespace and connects it to the Linux
|
||||
# bridge using a veth pair
|
||||
create_node_network() {
|
||||
local name="$1"
|
||||
local ip="$2"
|
||||
|
||||
# create the namespace
|
||||
ip netns add "${name}"
|
||||
|
||||
# create the veth pair
|
||||
local veth0="veth${name}0"
|
||||
local veth1="veth${name}1"
|
||||
ip link add name "${veth0}" type veth peer name "${veth1}"
|
||||
|
||||
# add one end to the bridge
|
||||
ip link set dev "${veth0}" master "${BRIDGE_NAME}"
|
||||
ip link set dev "${veth0}" up
|
||||
|
||||
# add the other end to the namespace, rename it eth0 and give it the ip
|
||||
ip link set dev "${veth1}" netns "${name}"
|
||||
ip netns exec "${name}" ip link set dev "${veth1}" name "eth0"
|
||||
ip netns exec "${name}" ip link set dev "eth0" up
|
||||
ip netns exec "${name}" ip address add "${ip}/24" dev "eth0"
|
||||
}
|
||||
|
||||
create_account() {
|
||||
local dir=$1
|
||||
local password=$2
|
||||
|
||||
geth --datadir "${dir}" --password /dev/stdin account new <<< "${password}"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
96
swarm/dev/scripts/random-uploads.sh
Executable file
96
swarm/dev/scripts/random-uploads.sh
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to upload random data to a swarm cluster.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# random-uploads.sh --addr 192.168.33.101:8500 --size 40k --count 1000
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
DEFAULT_ADDR="localhost:8500"
|
||||
DEFAULT_UPLOAD_SIZE="40k"
|
||||
DEFAULT_UPLOAD_COUNT="1000"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Upload random data to a Swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-a, --addr ADDR Swarm API address [default: ${DEFAULT_ADDR}]
|
||||
-s, --size SIZE Individual upload size [default: ${DEFAULT_UPLOAD_SIZE}]
|
||||
-c, --count COUNT Number of uploads [default: ${DEFAULT_UPLOAD_COUNT}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local addr="${DEFAULT_ADDR}"
|
||||
local upload_size="${DEFAULT_UPLOAD_SIZE}"
|
||||
local upload_count="${DEFAULT_UPLOAD_COUNT}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
info "uploading ${upload_count} ${upload_size} random files to ${addr}"
|
||||
|
||||
for i in $(seq 1 ${upload_count}); do
|
||||
info "upload ${i} / ${upload_count}:"
|
||||
do_random_upload
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
do_random_upload() {
|
||||
curl -fsSL -X POST --data-binary "$(random_data)" "http://${addr}/bzzr:/"
|
||||
}
|
||||
|
||||
random_data() {
|
||||
dd if=/dev/urandom of=/dev/stdout bs="${upload_size}" count=1 2>/dev/null
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-a | --addr)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--addr flag requires an argument"
|
||||
fi
|
||||
addr="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s | --size)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--size flag requires an argument"
|
||||
fi
|
||||
upload_size="$2"
|
||||
shift 2
|
||||
;;
|
||||
-c | --count)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--count flag requires an argument"
|
||||
fi
|
||||
upload_count="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
98
swarm/dev/scripts/stop-cluster.sh
Executable file
98
swarm/dev/scripts/stop-cluster.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to shutdown a dev swarm cluster.
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Shutdown a dev swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-d, --dir DIR Base directory [default: ${DEFAULT_BASE_DIR}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local base_dir="${DEFAULT_BASE_DIR}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
local pid_dir="${base_dir}/pids"
|
||||
|
||||
stop_swarm_nodes
|
||||
stop_node "geth"
|
||||
stop_node "bootnode"
|
||||
delete_network
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-d | --dir)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--dir flag requires an argument"
|
||||
fi
|
||||
base_dir="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_swarm_nodes() {
|
||||
for name in $(ls "${pid_dir}" | grep -oP 'swarm\d+'); do
|
||||
stop_node "${name}"
|
||||
done
|
||||
}
|
||||
|
||||
stop_node() {
|
||||
local name=$1
|
||||
local pid_file="${pid_dir}/${name}.pid"
|
||||
|
||||
if [[ -e "${pid_file}" ]]; then
|
||||
info "stopping ${name}"
|
||||
start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile "${pid_file}" \
|
||||
--remove-pidfile \
|
||||
--oknodo \
|
||||
--retry 15
|
||||
fi
|
||||
|
||||
if ip netns list | grep -qF "${name}"; then
|
||||
ip netns delete "${name}"
|
||||
fi
|
||||
|
||||
if ip link show "veth${name}0" &>/dev/null; then
|
||||
ip link delete dev "veth${name}0"
|
||||
fi
|
||||
}
|
||||
|
||||
delete_network() {
|
||||
if ip link show "swarmbr0" &>/dev/null; then
|
||||
ip link delete dev "swarmbr0"
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
53
swarm/dev/scripts/util.sh
Normal file
53
swarm/dev/scripts/util.sh
Normal file
@@ -0,0 +1,53 @@
|
||||
# shared shell functions
|
||||
|
||||
info() {
|
||||
local msg="$@"
|
||||
local timestamp="$(date +%H:%M:%S)"
|
||||
say "===> ${timestamp} ${msg}" "green"
|
||||
}
|
||||
|
||||
warn() {
|
||||
local msg="$@"
|
||||
local timestamp=$(date +%H:%M:%S)
|
||||
say "===> ${timestamp} WARN: ${msg}" "yellow" >&2
|
||||
}
|
||||
|
||||
fail() {
|
||||
local msg="$@"
|
||||
say "ERROR: ${msg}" "red" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# say prints the given message to STDOUT, using the optional color if
|
||||
# STDOUT is a terminal.
|
||||
#
|
||||
# usage:
|
||||
#
|
||||
# say "foo" - prints "foo"
|
||||
# say "bar" "red" - prints "bar" in red
|
||||
# say "baz" "green" - prints "baz" in green
|
||||
# say "qux" "red" | tee - prints "qux" with no colour
|
||||
#
|
||||
say() {
|
||||
local msg=$1
|
||||
local color=$2
|
||||
|
||||
if [[ -n "${color}" ]] && [[ -t 1 ]]; then
|
||||
case "${color}" in
|
||||
red)
|
||||
echo -e "\033[1;31m${msg}\033[0m"
|
||||
;;
|
||||
green)
|
||||
echo -e "\033[1;32m${msg}\033[0m"
|
||||
;;
|
||||
yellow)
|
||||
echo -e "\033[1;33m${msg}\033[0m"
|
||||
;;
|
||||
*)
|
||||
echo "${msg}"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
echo "${msg}"
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user