Skip to content
Snippets Groups Projects
Unverified Commit da29a7c2 authored by Wolfgang Welz's avatar Wolfgang Welz Committed by GitHub
Browse files

chore: remove unused packages (#99)

parent 2cc68e1b
No related branches found
No related tags found
No related merge requests found
Showing
with 10 additions and 1044 deletions
package batchworkerpool
import (
"sync"
"time"
)
type BatchWorkerPool struct {
workerFnc func([]Task)
options *Options
calls chan Task
batchedCalls chan []Task
terminate chan int
running bool
mutex sync.RWMutex
wait sync.WaitGroup
}
func New(workerFnc func([]Task), optionalOptions ...Option) (result *BatchWorkerPool) {
options := DEFAULT_OPTIONS.Override(optionalOptions...)
result = &BatchWorkerPool{
workerFnc: workerFnc,
options: options,
}
result.resetChannels()
return
}
func (wp *BatchWorkerPool) Submit(params ...interface{}) (result chan interface{}) {
result = make(chan interface{}, 1)
wp.mutex.RLock()
if wp.running {
wp.calls <- Task{
params: params,
resultChan: result,
}
} else {
close(result)
}
wp.mutex.RUnlock()
return
}
func (wp *BatchWorkerPool) Start() {
wp.mutex.Lock()
if !wp.running {
wp.running = true
wp.startBatchDispatcher()
wp.startBatchWorkers()
}
wp.mutex.Unlock()
}
func (wp *BatchWorkerPool) Run() {
wp.Start()
wp.wait.Wait()
}
func (wp *BatchWorkerPool) Stop() {
go wp.StopAndWait()
}
func (wp *BatchWorkerPool) StopAndWait() {
wp.mutex.Lock()
if wp.running {
wp.running = false
close(wp.terminate)
wp.resetChannels()
}
wp.wait.Wait()
wp.mutex.Unlock()
}
func (wp *BatchWorkerPool) resetChannels() {
wp.calls = make(chan Task, wp.options.QueueSize)
wp.batchedCalls = make(chan []Task, 2*wp.options.WorkerCount)
wp.terminate = make(chan int, 1)
}
func (wp *BatchWorkerPool) startBatchDispatcher() {
calls := wp.calls
terminate := wp.terminate
wp.wait.Add(1)
go func() {
for {
select {
case <-terminate:
wp.wait.Done()
return
case firstCall := <-calls:
batchTask := append(make([]Task, 0), firstCall)
collectionTimeout := time.After(wp.options.BatchCollectionTimeout)
// collect additional requests that arrive within the timeout
CollectAdditionalCalls:
for {
select {
case <-terminate:
wp.wait.Done()
return
case <-collectionTimeout:
break CollectAdditionalCalls
case call := <-wp.calls:
batchTask = append(batchTask, call)
if len(batchTask) == wp.options.BatchSize {
break CollectAdditionalCalls
}
}
}
wp.batchedCalls <- batchTask
}
}
}()
}
func (wp *BatchWorkerPool) startBatchWorkers() {
batchedCalls := wp.batchedCalls
terminate := wp.terminate
for i := 0; i < wp.options.WorkerCount; i++ {
wp.wait.Add(1)
go func() {
aborted := false
for !aborted {
select {
case <-terminate:
aborted = true
case batchTask := <-batchedCalls:
wp.workerFnc(batchTask)
}
}
wp.wait.Done()
}()
}
}
package batchworkerpool
import (
"runtime"
"time"
)
var DEFAULT_OPTIONS = &Options{
WorkerCount: 2 * runtime.NumCPU(),
QueueSize: 2 * runtime.NumCPU() * 64,
BatchSize: 64,
BatchCollectionTimeout: 15 * time.Millisecond,
}
func WorkerCount(workerCount int) Option {
return func(args *Options) {
args.WorkerCount = workerCount
}
}
func BatchSize(batchSize int) Option {
return func(args *Options) {
args.BatchSize = batchSize
}
}
func BatchCollectionTimeout(batchCollectionTimeout time.Duration) Option {
return func(args *Options) {
args.BatchCollectionTimeout = batchCollectionTimeout
}
}
func QueueSize(queueSize int) Option {
return func(args *Options) {
args.QueueSize = queueSize
}
}
type Options struct {
WorkerCount int
QueueSize int
BatchSize int
BatchCollectionTimeout time.Duration
}
func (options Options) Override(optionalOptions ...Option) *Options {
result := &options
for _, option := range optionalOptions {
option(result)
}
return result
}
type Option func(*Options)
package batchworkerpool
type Task struct {
params []interface{}
resultChan chan interface{}
}
func (task *Task) Return(result interface{}) {
task.resultChan <- result
close(task.resultChan)
}
func (task *Task) Param(index int) interface{} {
return task.params[index]
}
package client
import (
"github.com/iotaledger/goshimmer/packages/curl"
"github.com/iotaledger/goshimmer/packages/model/value_transaction"
"github.com/iotaledger/iota.go/curl"
"github.com/iotaledger/iota.go/trinary"
)
......@@ -31,11 +31,9 @@ func CalculateBundleHash(transactions []*value_transaction.ValueTransaction) tri
copy(concatenatedBundleEssences[value_transaction.BUNDLE_ESSENCE_SIZE*i:value_transaction.BUNDLE_ESSENCE_SIZE*(i+1)], bundleTransaction.GetBundleEssence(lastInputAddress != bundleTransaction.GetAddress()))
}
var bundleHash = make(trinary.Trits, 243)
hasher := curl.NewCurl(243, 81)
hasher.Absorb(concatenatedBundleEssences, 0, len(concatenatedBundleEssences))
hasher.Squeeze(bundleHash, 0, 243)
bundleHash, err := curl.HashTrits(concatenatedBundleEssences)
if err != nil {
panic(err)
}
return trinary.MustTritsToTrytes(bundleHash)
}
package client
package client
import (
"github.com/iotaledger/iota.go/consts"
"github.com/iotaledger/iota.go/trinary"
)
type ValueBundleFactory struct {
seed trinary.Trytes
securityLevel consts.SecurityLevel
seedInputs map[uint64]uint64
seedOutputs map[uint64]uint64
}
func New(seed trinary.Trytes, securityLevel consts.SecurityLevel) *ValueBundleFactory {
return &ValueBundleFactory{
seed: seed,
securityLevel: securityLevel,
seedInputs: make(map[uint64]uint64),
seedOutputs: make(map[uint64]uint64),
}
}
func (factory *ValueBundleFactory) AddInput(addressIndex uint64, value uint64) {
factory.seedInputs[addressIndex] = value
}
package curl
import (
"strconv"
"github.com/iotaledger/goshimmer/packages/batchworkerpool"
"github.com/iotaledger/goshimmer/packages/ternary"
"github.com/iotaledger/iota.go/trinary"
)
type BatchHasher struct {
hashLength int
rounds int
workerPool *batchworkerpool.BatchWorkerPool
}
func NewBatchHasher(hashLength int, rounds int) (result *BatchHasher) {
result = &BatchHasher{
hashLength: hashLength,
rounds: rounds,
}
result.workerPool = batchworkerpool.New(result.processHashes, batchworkerpool.BatchSize(strconv.IntSize), batchworkerpool.WorkerCount(100), batchworkerpool.QueueSize(500000))
result.workerPool.Start()
return
}
func (this *BatchHasher) Hash(trits trinary.Trits) trinary.Trits {
return (<-this.workerPool.Submit(trits)).(trinary.Trits)
}
func (this *BatchHasher) processHashes(tasks []batchworkerpool.Task) {
if len(tasks) > 1 {
// multiplex the requests
multiplexer := ternary.NewBCTernaryMultiplexer()
for _, hashRequest := range tasks {
multiplexer.Add(hashRequest.Param(0).(trinary.Trits))
}
bcTrits, err := multiplexer.Extract()
if err != nil {
panic(err)
}
// calculate the hash
bctCurl := NewBCTCurl(this.hashLength, this.rounds, strconv.IntSize)
bctCurl.Reset()
bctCurl.Absorb(bcTrits)
// extract the results from the demultiplexer
demux := ternary.NewBCTernaryDemultiplexer(bctCurl.Squeeze(243))
for i, task := range tasks {
task.Return(demux.Get(i))
}
} else {
var resp = make(trinary.Trits, this.hashLength)
trits := tasks[0].Param(0).(trinary.Trits)
curl := NewCurl(this.hashLength, this.rounds)
curl.Absorb(trits, 0, len(trits))
curl.Squeeze(resp, 0, this.hashLength)
tasks[0].Return(resp)
}
}
package curl
import (
"crypto/ed25519"
"sync"
"testing"
"github.com/iotaledger/iota.go/trinary"
"golang.org/x/crypto/blake2b"
)
type zeroReader struct{}
func (zeroReader) Read(buf []byte) (int, error) {
for i := range buf {
buf[i] = 0
}
return len(buf), nil
}
func BenchmarkEd25519(b *testing.B) {
var zero zeroReader
public, private, _ := ed25519.GenerateKey(zero)
message := make([]byte, 75)
sig := ed25519.Sign(private, message)
b.ResetTimer()
var wg sync.WaitGroup
for i := 0; i < b.N; i++ {
wg.Add(1)
go func() {
if !ed25519.Verify(public, message, sig) {
panic("valid signature rejected")
}
wg.Done()
}()
}
wg.Wait()
}
var sampleTransactionData = make([]byte, 750)
func BenchmarkBytesToTrits(b *testing.B) {
bytes := blake2b.Sum512(sampleTransactionData)
b.ResetTimer()
var wg sync.WaitGroup
for i := 0; i < b.N; i++ {
wg.Add(1)
go func() {
_, _ = trinary.BytesToTrits(bytes[:])
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkBlake2b(b *testing.B) {
var wg sync.WaitGroup
for i := 0; i < b.N; i++ {
wg.Add(1)
go func() {
blake2b.Sum256(sampleTransactionData)
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkBatchHasher_Hash(b *testing.B) {
batchHasher := NewBatchHasher(243, 81)
tritsToHash := make(trinary.Trits, 7500)
b.ResetTimer()
var wg sync.WaitGroup
for i := 0; i < b.N; i++ {
wg.Add(1)
go func() {
batchHasher.Hash(tritsToHash)
wg.Done()
}()
}
wg.Wait()
}
package curl
import "github.com/iotaledger/goshimmer/packages/ternary"
const (
NUMBER_OF_TRITS_IN_A_TRYTE = 3
)
type BCTCurl struct {
hashLength int
numberOfRounds int
highLongBits uint
stateLength int
state ternary.BCTrits
cTransform func()
}
func NewBCTCurl(hashLength int, numberOfRounds int, batchSize int) *BCTCurl {
var highLongBits uint
for i := 0; i < batchSize; i++ {
highLongBits += 1 << uint(i)
}
this := &BCTCurl{
hashLength: hashLength,
numberOfRounds: numberOfRounds,
highLongBits: highLongBits,
stateLength: NUMBER_OF_TRITS_IN_A_TRYTE * hashLength,
state: ternary.BCTrits{
Lo: make([]uint, NUMBER_OF_TRITS_IN_A_TRYTE*hashLength),
Hi: make([]uint, NUMBER_OF_TRITS_IN_A_TRYTE*hashLength),
},
cTransform: nil,
}
this.Reset()
return this
}
func (this *BCTCurl) Reset() {
for i := 0; i < this.stateLength; i++ {
this.state.Lo[i] = this.highLongBits
this.state.Hi[i] = this.highLongBits
}
}
func (this *BCTCurl) Transform() {
scratchPadLo := make([]uint, this.stateLength)
scratchPadHi := make([]uint, this.stateLength)
scratchPadIndex := 0
for round := this.numberOfRounds; round > 0; round-- {
copy(scratchPadLo, this.state.Lo)
copy(scratchPadHi, this.state.Hi)
for stateIndex := 0; stateIndex < this.stateLength; stateIndex++ {
alpha := scratchPadLo[scratchPadIndex]
beta := scratchPadHi[scratchPadIndex]
if scratchPadIndex < 365 {
scratchPadIndex += 364
} else {
scratchPadIndex -= 365
}
delta := beta ^ scratchPadLo[scratchPadIndex]
this.state.Lo[stateIndex] = ^(delta & alpha)
this.state.Hi[stateIndex] = delta | (alpha ^ scratchPadHi[scratchPadIndex])
}
}
}
func (this *BCTCurl) Absorb(bcTrits ternary.BCTrits) {
length := len(bcTrits.Lo)
offset := 0
for {
var lengthToCopy int
if length < this.hashLength {
lengthToCopy = length
} else {
lengthToCopy = this.hashLength
}
copy(this.state.Lo[0:lengthToCopy], bcTrits.Lo[offset:offset+lengthToCopy])
copy(this.state.Hi[0:lengthToCopy], bcTrits.Hi[offset:offset+lengthToCopy])
this.Transform()
offset += lengthToCopy
length -= lengthToCopy
if length <= 0 {
break
}
}
}
func (this *BCTCurl) Squeeze(tritCount int) ternary.BCTrits {
result := ternary.BCTrits{
Lo: make([]uint, tritCount),
Hi: make([]uint, tritCount),
}
hashCount := tritCount / this.hashLength
for i := 0; i < hashCount; i++ {
copy(result.Lo[i*this.hashLength:(i+1)*this.hashLength], this.state.Lo[0:this.hashLength])
copy(result.Hi[i*this.hashLength:(i+1)*this.hashLength], this.state.Hi[0:this.hashLength])
this.Transform()
}
last := tritCount - hashCount*this.hashLength
copy(result.Lo[tritCount-last:], this.state.Lo[0:last])
copy(result.Hi[tritCount-last:], this.state.Hi[0:last])
if tritCount%this.hashLength != 0 {
this.Transform()
}
return result
}
package curl
import (
"math"
"github.com/iotaledger/iota.go/trinary"
)
const (
HASH_LENGTH = 243
STATE_LENGTH = 3 * HASH_LENGTH
)
var (
TRUTH_TABLE = trinary.Trits{1, 0, -1, 2, 1, -1, 0, 2, -1, 1, 0}
)
type Hash interface {
Initialize()
InitializeCurl(trits *[]int8, length int, rounds int)
Reset()
Absorb(trits *[]int8, offset int, length int)
Squeeze(resp []int8, offset int, length int) []int
}
type Curl struct {
Hash
state trinary.Trits
hashLength int
rounds int
}
func NewCurl(hashLength int, rounds int) *Curl {
this := &Curl{
hashLength: hashLength,
rounds: rounds,
}
this.Reset()
return this
}
func (curl *Curl) Initialize() {
curl.InitializeCurl(nil, 0, curl.rounds)
}
func (curl *Curl) InitializeCurl(trits trinary.Trits, length int, rounds int) {
curl.rounds = rounds
if trits != nil {
curl.state = trits
} else {
curl.state = make(trinary.Trits, STATE_LENGTH)
}
}
func (curl *Curl) Reset() {
curl.InitializeCurl(nil, 0, curl.rounds)
}
func (curl *Curl) Absorb(trits trinary.Trits, offset int, length int) {
for {
limit := int(math.Min(HASH_LENGTH, float64(length)))
copy(curl.state, trits[offset:offset+limit])
curl.Transform()
offset += HASH_LENGTH
length -= HASH_LENGTH
if length <= 0 {
break
}
}
}
func (curl *Curl) Squeeze(resp trinary.Trits, offset int, length int) trinary.Trits {
for {
limit := int(math.Min(HASH_LENGTH, float64(length)))
copy(resp[offset:offset+limit], curl.state)
curl.Transform()
offset += HASH_LENGTH
length -= HASH_LENGTH
if length <= 0 {
break
}
}
return resp
}
func (curl *Curl) Transform() {
var index = 0
for round := 0; round < curl.rounds; round++ {
stateCopy := make(trinary.Trits, STATE_LENGTH)
copy(stateCopy, curl.state)
for i := 0; i < STATE_LENGTH; i++ {
incr := 364
if index >= 365 {
incr = -365
}
index2 := index + incr
curl.state[i] = TRUTH_TABLE[stateCopy[index]+(stateCopy[index2]<<2)+5]
index = index2
}
}
}
package curl
const (
CURLP81_HASH_LENGTH = 243
CURLP81_ROUNDS = 81
)
var (
CURLP81 = NewBatchHasher(CURLP81_HASH_LENGTH, CURLP81_ROUNDS)
)
package filter
import (
"sync"
"github.com/iotaledger/hive.go/typeutils"
)
type ByteArrayFilter struct {
byteArrays [][]byte
byteArraysByKey map[string]bool
size int
mutex sync.RWMutex
}
func NewByteArrayFilter(size int) *ByteArrayFilter {
return &ByteArrayFilter{
byteArrays: make([][]byte, 0, size),
byteArraysByKey: make(map[string]bool, size),
size: size,
}
}
func (filter *ByteArrayFilter) Contains(byteArray []byte) bool {
filter.mutex.RLock()
defer filter.mutex.RUnlock()
_, exists := filter.byteArraysByKey[typeutils.BytesToString(byteArray)]
return exists
}
func (filter *ByteArrayFilter) Add(byteArray []byte) bool {
key := typeutils.BytesToString(byteArray)
filter.mutex.Lock()
defer filter.mutex.Unlock()
if _, exists := filter.byteArraysByKey[key]; !exists {
if len(filter.byteArrays) == filter.size {
delete(filter.byteArraysByKey, typeutils.BytesToString(filter.byteArrays[0]))
filter.byteArrays = append(filter.byteArrays[1:], byteArray)
} else {
filter.byteArrays = append(filter.byteArrays, byteArray)
}
filter.byteArraysByKey[key] = true
return true
} else {
return false
}
}
package filter
import "testing"
func BenchmarkAdd(b *testing.B) {
filter, byteArray := setupFilter(15000, 1604)
b.ResetTimer()
for i := 0; i < b.N; i++ {
filter.Add(byteArray)
}
}
func BenchmarkContains(b *testing.B) {
filter, byteArray := setupFilter(15000, 1604)
b.ResetTimer()
for i := 0; i < b.N; i++ {
filter.Contains(byteArray)
}
}
func setupFilter(filterSize int, byteArraySize int) (*ByteArrayFilter, []byte) {
filter := NewByteArrayFilter(filterSize)
for j := 0; j < filterSize; j++ {
byteArray := make([]byte, byteArraySize)
for i := 0; i < len(byteArray); i++ {
byteArray[(i+j)%byteArraySize] = byte((i + j) % 128)
}
filter.Add(byteArray)
}
byteArray := make([]byte, byteArraySize)
for i := 0; i < len(byteArray); i++ {
byteArray[i] = byte(i % 128)
}
return filter, byteArray
}
package settings
import (
"sync"
"github.com/iotaledger/goshimmer/packages/database"
)
var settingsDatabase database.Database
var lazyInit sync.Once
func Get(key []byte) ([]byte, error) {
lazyInit.Do(initDb)
return settingsDatabase.Get(key)
}
func Set(key []byte, value []byte) error {
lazyInit.Do(initDb)
return settingsDatabase.Set(key, value)
}
func initDb() {
if db, err := database.Get("settings"); err != nil {
panic(err)
} else {
settingsDatabase = db
}
}
package ternary
// a Binary Coded Trit encodes a Trit in 2 bits with -1 => 00, 0 => 01 and 1 => 10
type BCTrit struct {
Lo uint
Hi uint
}
// a Binary Coded Trytes consists out of many Binary Coded Trits
type BCTrits struct {
Lo []uint
Hi []uint
}
package ternary
import (
. "github.com/iotaledger/iota.go/trinary"
)
type BCTernaryDemultiplexer struct {
bcTrits BCTrits
}
func NewBCTernaryDemultiplexer(bcTrits BCTrits) *BCTernaryDemultiplexer {
this := &BCTernaryDemultiplexer{bcTrits: bcTrits}
return this
}
func (this *BCTernaryDemultiplexer) Get(index int) Trits {
length := len(this.bcTrits.Lo)
result := make(Trits, length)
for i := 0; i < length; i++ {
low := (this.bcTrits.Lo[i] >> uint(index)) & 1
hi := (this.bcTrits.Hi[i] >> uint(index)) & 1
switch true {
case low == 1 && hi == 0:
result[i] = -1
case low == 0 && hi == 1:
result[i] = 1
case low == 1 && hi == 1:
result[i] = 0
default:
result[i] = 0
}
}
return result
}
package ternary
import (
"errors"
"strconv"
. "github.com/iotaledger/iota.go/trinary"
)
type BCTernaryMultiplexer struct {
trinaries []Trits
}
func NewBCTernaryMultiplexer() *BCTernaryMultiplexer {
this := &BCTernaryMultiplexer{make([]Trits, 0)}
return this
}
func (this *BCTernaryMultiplexer) Add(trits Trits) int {
this.trinaries = append(this.trinaries, trits)
return len(this.trinaries) - 1
}
func (this *BCTernaryMultiplexer) Get(index int) Trits {
return this.trinaries[index]
}
func (this *BCTernaryMultiplexer) Extract() (BCTrits, error) {
trinariesCount := len(this.trinaries)
tritsCount := len(this.trinaries[0])
result := BCTrits{
Lo: make([]uint, tritsCount),
Hi: make([]uint, tritsCount),
}
for i := 0; i < tritsCount; i++ {
bcTrit := &BCTrit{0, 0}
for j := 0; j < trinariesCount; j++ {
switch this.trinaries[j][i] {
case -1:
bcTrit.Lo |= 1 << uint(j)
case 1:
bcTrit.Hi |= 1 << uint(j)
case 0:
bcTrit.Lo |= 1 << uint(j)
bcTrit.Hi |= 1 << uint(j)
default:
return result, errors.New("Invalid trit #" + strconv.Itoa(i) + " in trits #" + strconv.Itoa(j))
}
}
result.Lo[i] = bcTrit.Lo
result.Hi[i] = bcTrit.Hi
}
return result, nil
}
package bundleprocessor
import (
"github.com/iotaledger/goshimmer/packages/curl"
"github.com/iotaledger/goshimmer/packages/errors"
"github.com/iotaledger/goshimmer/packages/model/bundle"
"github.com/iotaledger/goshimmer/packages/model/value_transaction"
"github.com/iotaledger/goshimmer/packages/workerpool"
"github.com/iotaledger/iota.go/curl"
"github.com/iotaledger/iota.go/signing"
"github.com/iotaledger/iota.go/trinary"
)
......@@ -38,12 +38,10 @@ func CalculateBundleHash(transactions []*value_transaction.ValueTransaction) tri
copy(concatenatedBundleEssences[value_transaction.BUNDLE_ESSENCE_SIZE*i:value_transaction.BUNDLE_ESSENCE_SIZE*(i+1)], bundleTransaction.GetBundleEssence(lastInputAddress != bundleTransaction.GetAddress()))
}
var bundleHash = make(trinary.Trits, 243)
hasher := curl.NewCurl(243, 81)
hasher.Absorb(concatenatedBundleEssences, 0, len(concatenatedBundleEssences))
hasher.Squeeze(bundleHash, 0, 243)
bundleHash, err := curl.HashTrits(concatenatedBundleEssences)
if err != nil {
panic(err)
}
return trinary.MustTritsToTrytes(bundleHash)
}
......
package validator
import (
"github.com/iotaledger/goshimmer/packages/model/bundle"
"github.com/iotaledger/goshimmer/packages/model/value_transaction"
"github.com/iotaledger/goshimmer/plugins/bundleprocessor"
"github.com/iotaledger/hive.go/events"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/hive.go/node"
"github.com/iotaledger/iota.go/kerl"
"github.com/iotaledger/iota.go/signing"
. "github.com/iotaledger/iota.go/trinary"
)
var PLUGIN = node.NewPlugin("Validator", node.Enabled, configure, run)
var log *logger.Logger
func validateSignatures(bundleHash Hash, txs []*value_transaction.ValueTransaction) (bool, error) {
for i, tx := range txs {
// ignore all non-input transactions
if tx.GetValue() >= 0 {
continue
}
address := tx.GetAddress()
// it is unknown how many fragments there will be
fragments := []Trytes{tx.GetSignatureMessageFragment()}
// each consecutive meta transaction with the same address contains another signature fragment
for j := i; j < len(txs)-1; j++ {
otherTx := txs[j+1]
if otherTx.GetValue() != 0 || otherTx.GetAddress() != address {
break
}
fragments = append(fragments, otherTx.GetSignatureMessageFragment())
}
// validate all the fragments against the address using Kerl
valid, err := signing.ValidateSignatures(address, fragments, bundleHash, kerl.NewKerl())
if err != nil {
return false, err
}
if !valid {
return false, nil
}
}
return true, nil
}
func configure(plugin *node.Plugin) {
log = logger.NewLogger("Validator")
bundleprocessor.Events.BundleSolid.Attach(events.NewClosure(func(b *bundle.Bundle, txs []*value_transaction.ValueTransaction) {
// signature are verified against the bundle hash
valid, err := validateSignatures(b.GetBundleEssenceHash(), txs)
if !valid {
if err != nil {
log.Errorf("Invalid signature: %s", err.Error())
} else {
log.Error("Invalid signature")
}
}
}))
}
func run(*node.Plugin) {
}
package validator
import (
"fmt"
"github.com/iotaledger/iota.go/address"
. "github.com/iotaledger/iota.go/consts"
"github.com/iotaledger/iota.go/signing"
. "github.com/iotaledger/iota.go/trinary"
)
const (
exampleHash = "999999999999999999999999999999999999999999999999999999999999999999999999999999999"
exampleSeed = exampleHash
exmapleIndex = 0
exampleSec = SecurityLevelLow
)
// Creates bundle signature fragments for the given address index and bundle hash.
// Each signature fragment after the first must go into its own meta transaction with value = 0.
func signature(seed Trytes, index uint64, sec SecurityLevel, bundleHash Hash) []Trytes {
// compute seed based on address index
subseed, _ := signing.Subseed(seed, index)
// generate the private key
prvKey, _ := signing.Key(subseed, sec)
normalizedBundleHash := signing.NormalizedBundleHash(bundleHash)
signatureFragments := make([]Trytes, sec)
for i := 0; i < int(sec); i++ {
// each security level signs one third of the (normalized) bundle hash
signedFragTrits, _ := signing.SignatureFragment(
normalizedBundleHash[i*HashTrytesSize/3:(i+1)*HashTrytesSize/3],
prvKey[i*KeyFragmentLength:(i+1)*KeyFragmentLength],
)
signatureFragments[i] = MustTritsToTrytes(signedFragTrits)
}
return signatureFragments
}
func ExamplePLUGIN() {
// corresponding address to validate against.
addr, _ := address.GenerateAddress(exampleSeed, exmapleIndex, exampleSec)
fmt.Println(addr)
// compute the signature fragments which would be added to the (meta) transactions
signatureFragments := signature(exampleSeed, exmapleIndex, exampleSec, exampleHash)
fmt.Println(signatureFragments[0])
// Output:
// BSIXFJENGVJSOWPVHVALMPOPO9PUKHXDQI9VDELCBJXN9TCNQPTFEDMPQCVBOJSZUHEOABYYYAT9IAHHY
// GHHKPBXOOBOEHGGEEKYPH9MANWEKSQTQJFJ9KUTMJQAVITYRZMNLUESQARNHAWUJAPPZSQ9A9RUKABCE9KZPJDUEHVZEOSCQMTCC9AWBGWZLZEXMJ9YOQUVIBGMXSINCOLUATYDDUBAALHCBIONNRQIVIPUFPOIFHYRBFBGXXNVYXFZUSTTA9LYGGITTAJCVDE9GCFRGIOTXLQ9ZJDLONDLZ9OPS9TNYVKLTCGFBH9QPJWLIGADWMTJVCLAUCOZFDSRRCAMVWYFXRPGPMIOPIW9GBWANVSMPONQOTNLLYYHXAMZMMNRHMRXHEIXPVNORNGZZ9ZAU9RAWASOZNIBKDWYZWKCMLEUE9UVDHZ9XXGPXZABB9FGTNDTDFTYCKLKRRC9GZFKHKDGAWPBWEUPPWISYBBNZCIBERPXTMZPZHPKKUQUPBIJBIKZAGFHDDNAGCRQMWOMLUMAYKRBMHPMDWZK9JRBDWCJCBJQYMDUBNKOIRSJSVTCNKROZ9KLFBZLOXQOASLCFETCNZRPZULOABOFCUO9WKNQILLLTQ9GWVDBASBGSKUHFHRXOKQIBRCLUYZBZMTXTIG9BJNYHTJQQOECXOWLIDOYKMFJWKRCYW99VZILSPU9I9ZSTTBZVGISUHPCWLGKCFNLIHJNCL9OWQDNAKJAGRKTGCTDRHXVAYXOHNFVJYBMZLMXV9VINNIAWONYDYOKHHMOFFEOOVBMVMYABWRWLZTWJECKKAGPCIMUDZZIEJCFBXFIYKDRMWZIOEUZNLOXZJRDHVVKOTJWMLTIXVIRJSXUBLFGOCCLEIZVCDYD9FEMCRUOERPRDFGUJSALRSOBN9J9XDTUAJZFLHUGQI9MCXZCYWTTIHNQUPUYPDRJLRZG9HAXHYQDSSCQNPTBYKNQUWZDE9QUESZJASRXHNW9OKAVUKLLMVGOJJRZCPRXSRYUECLNQEFIHI9S9NNEN9KACVIKCZYDEKCDNUASUJWMTVLSPBOBQMQEMZJXJVQAMUGBTMNWEWVJSXNZKIAADSQCCLISYSUZICSIVXZUG9MTICGWXKXKJDW9TOUBS9BTOUFUKWEBVIIJTGD9IBLRHBCPICWSZQNJQERTBOZGLJFCXKGQTAHIWKOSGHRMMWXABQYHVHOPG9XDIXMIRBXHOSYBCHSFWORNLUD9JAB9ICBIPXYVLIXYNRHJVEDMIRSAGXKZKSFZADJ9GA9DGJZAJTXZGIKRXVBCCBGJPJWJJZXZRQNWLEUZEFTWOXUBTAGDPPKKPKRYPGXVSRWLRNEDAXHZYT9DRN9L9ZWXPTTOSKMGTPQQXHACAKESRQXVXXNOLIATRKDGGJNIDWWYKQSLTC9ERTPMNXQHZNVNSBGIRRQHMOCOGDWPQAU9WPRSGZMPXZWQADUFUAWVGESLIWZNV9WNANDMZAOLXIHAOSFBADWVVAHMJVFNX9BGMMYGMJCUOYCSKJWIUMYHQFQXCFQXQNB9VTBLAYGKUZLFH9UVWIQJVLMLOZDLLIPJZSNXBPWAKKZWKCVSWUSBSQLBIAX9SQGMNPCJWTQDQEASSWWCSTVJRFDBPBLNYU9CNFUYINVMQPJZGKKUH9QBMUVWFSLPXWKBBWKNLMHGCEMJWCTNXZYWCFXYU9XLTWDSROJDTCRARMBNYDDD99HCFMXMUCO9NJSRA9G9HGWRTWNDBDQLBTCNYIVRMWRWPDJDDYCDODGEBNFTNINPNMZYMJJHVZSNEIJOAPGHAIVCZHQIULTRIZ9ML9LCWTQVGLBKKBGJYZTOZZIYUBCBKHKYUHCFGZKDERTWYHNYWSWLGPUGRB9WNQTHOMBFPKUQZREUQCNXL9MFSZCNBN9PTAVCERMWTTFDZL9BJQMC9OUBWGDTURAEYTYRDNFUBATOWFSVNXJC9JUPARMU9MINY9RWRHIXBPNIUADFAEP9F9FWNJNRPNGLWHRYYCV9ZIWBOUZPFZTWDLOCNOYZQLWFJHZ99ZBLUDSIQBJOJXMQJBUCYYMROBCJJJNCETVUYRXKHAWGUBIWOKQXOIOYBQKNDXZCKXQZLWEMXYLJPODRMOQUYOAATZZQ9JZDR9KPIHRQKIEAQNO9OVXNHDFCUUIZRQDWYGKUAYIGHGIIJIOIERLVNDUEBZUAQGDZMWNGXQPYSNWUEGF9BQDFJEQRPEGFGJTQFWO9PWECFGNDH9LW
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment