diff --git a/packages/binary/valuetransfer/balance/color.go b/packages/binary/valuetransfer/balance/color.go
index b50ccacfd5458773a0167a3adbdf26a279485a6c..5f7ef740c003b927bad08fac65f5b63ad3026d2b 100644
--- a/packages/binary/valuetransfer/balance/color.go
+++ b/packages/binary/valuetransfer/balance/color.go
@@ -34,3 +34,5 @@ func (color Color) String() string {
}
var COLOR_IOTA Color = [32]byte{}
+
+var COLOR_NEW = [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
diff --git a/packages/binary/valuetransfer/ledgerstate/ledgerstate.go b/packages/binary/valuetransfer/ledgerstate/ledgerstate.go
new file mode 100644
index 0000000000000000000000000000000000000000..0bd374e50d1f28e322b5ab3f0e02df5c03264a64
--- /dev/null
+++ b/packages/binary/valuetransfer/ledgerstate/ledgerstate.go
@@ -0,0 +1,4 @@
+package ledgerstate
+
+type LedgerState struct {
+}
diff --git a/packages/binary/valuetransfer/tangle/branch.go b/packages/binary/valuetransfer/tangle/branch.go
new file mode 100644
index 0000000000000000000000000000000000000000..ad0af4da4332553891c6ee8f8d2fc9c26d980c33
--- /dev/null
+++ b/packages/binary/valuetransfer/tangle/branch.go
@@ -0,0 +1,84 @@
+package tangle
+
+import (
+ "github.com/iotaledger/hive.go/objectstorage"
+)
+
+type Branch struct {
+ objectstorage.StorableObjectFlags
+
+ id BranchId
+ parentBranches []BranchId
+}
+
+func (branch *Branch) Update(other objectstorage.StorableObject) {
+ panic("implement me")
+}
+
+func (branch *Branch) ObjectStorageKey() []byte {
+ panic("implement me")
+}
+
+func (branch *Branch) ObjectStorageValue() []byte {
+ panic("implement me")
+}
+
+func (branch *Branch) UnmarshalObjectStorageValue(valueBytes []byte) (err error, consumedBytes int) {
+ panic("implement me")
+}
+
+func NewBranch(id BranchId, parentBranches []BranchId) *Branch {
+ return nil
+}
+
+func (branch *Branch) Id() BranchId {
+ return branch.id
+}
+
+func (branch *Branch) ParentBranches() []BranchId {
+ return branch.parentBranches
+}
+
+func (branch *Branch) IsAggregated() bool {
+ return len(branch.parentBranches) > 1
+}
+
+type CachedBranch struct {
+ objectstorage.CachedObject
+}
+
+func (cachedBranches *CachedBranch) Unwrap() *Branch {
+ if untypedObject := cachedBranches.Get(); untypedObject == nil {
+ return nil
+ } else {
+ if typedObject := untypedObject.(*Branch); typedObject == nil || typedObject.IsDeleted() {
+ return nil
+ } else {
+ return typedObject
+ }
+ }
+}
+
+func (cachedBranches *CachedBranch) Consume(consumer func(branch *Branch), forceRelease ...bool) (consumed bool) {
+ return cachedBranches.CachedObject.Consume(func(object objectstorage.StorableObject) {
+ consumer(object.(*Branch))
+ }, forceRelease...)
+}
+
+type CachedBranches map[BranchId]*CachedBranch
+
+func (cachedBranches CachedBranches) Consume(consumer func(branch *Branch)) (consumed bool) {
+ for _, cachedBranch := range cachedBranches {
+ consumed = cachedBranch.Consume(func(output *Branch) {
+ consumer(output)
+ }) || consumed
+ }
+
+ return
+}
+
+func (cachedBranches CachedBranches) Release(force ...bool) {
+ for _, cachedBranch := range cachedBranches {
+ cachedBranch.Release(force...)
+ }
+}
diff --git a/packages/binary/valuetransfer/tangle/branchid.go b/packages/binary/valuetransfer/tangle/branchid.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba7f6f93039742e87906f7418fe8e3f23b0e2733
--- /dev/null
+++ b/packages/binary/valuetransfer/tangle/branchid.go
@@ -0,0 +1,84 @@
+package tangle
+
+import (
+ "fmt"
+
+ "github.com/iotaledger/hive.go/marshalutil"
+ "github.com/mr-tron/base58"
+
+ "github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+)
+
+type BranchId [BranchIdLength]byte
+
+var (
+ UndefinedBranchId = BranchId{}
+ MasterBranchId = BranchId{1}
+)
+
+// NewBranchId creates a new BranchId from a transaction Id.
+func NewBranchId(transactionId transaction.Id) (branchId BranchId) {
+ copy(branchId[:], transactionId.Bytes())
+
+ return
+}
+
+func BranchIdFromBytes(bytes []byte) (result BranchId, err error, consumedBytes int) {
+ // parse the bytes
+ marshalUtil := marshalutil.New(bytes)
+ branchIdBytes, idErr := marshalUtil.ReadBytes(BranchIdLength)
+ if idErr != nil {
+ err = idErr
+
+ return
+ }
+ copy(result[:], branchIdBytes)
+ consumedBytes = marshalUtil.ReadOffset()
+
+ return
+}
+
+func BranchIdFromBase58(base58String string) (branchId BranchId, err error) {
+ // decode string
+ bytes, err := base58.Decode(base58String)
+ if err != nil {
+ return
+ }
+
+ // sanitize input
+ if len(bytes) != BranchIdLength {
+ err = fmt.Errorf("base58 encoded string does not match the length of a BranchId")
+
+ return
+ }
+
+ // copy bytes to result
+ copy(branchId[:], bytes)
+
+ return
+}
+
+func ParseBranchId(marshalUtil *marshalutil.MarshalUtil) (result BranchId, err error) {
+ var branchIdBytes []byte
+ if branchIdBytes, err = marshalUtil.ReadBytes(BranchIdLength); err != nil {
+ return
+ }
+
+ copy(result[:], branchIdBytes)
+
+ return
+}
+
+// Bytes marshals the BranchId into a sequence of bytes.
+func (branchId BranchId) Bytes() []byte {
+ return branchId[:]
+}
+
+// String creates a base58 encoded version of the BranchId.
+func (branchId BranchId) String() string {
+ return base58.Encode(branchId[:])
+}
+
+// BranchIdLength encodes the length of a branch identifier - since branches get created by transactions, it has the
+// same length as a transaction Id.
+const BranchIdLength = transaction.IdLength
diff --git a/packages/binary/valuetransfer/tangle/objectstorage.go b/packages/binary/valuetransfer/tangle/objectstorage.go
index e02ee3b34b926503949f082b654e8dc4ae5ba5b5..421fdb41c1fb1c421e4aa326dc2bc913a4e75f9b 100644
--- a/packages/binary/valuetransfer/tangle/objectstorage.go
+++ b/packages/binary/valuetransfer/tangle/objectstorage.go
@@ -4,7 +4,6 @@ import (
"github.com/iotaledger/hive.go/objectstorage"
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
- "github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
)
const (
@@ -43,7 +42,7 @@ func osAttachmentFactory(key []byte) (objectstorage.StorableObject, error, int)
}
func osOutputFactory(key []byte) (objectstorage.StorableObject, error, int) {
- return transaction.OutputFromStorageKey(key)
+ return OutputFromStorageKey(key)
}
func osMissingOutputFactory(key []byte) (objectstorage.StorableObject, error, int) {
diff --git a/packages/binary/valuetransfer/transaction/output.go b/packages/binary/valuetransfer/tangle/output.go
similarity index 76%
rename from packages/binary/valuetransfer/transaction/output.go
rename to packages/binary/valuetransfer/tangle/output.go
index 78535f4f246b161c54bd13dad520ca3f9e0bad36..1696031bda99b75fb6184677f00c12f600a06fed 100644
--- a/packages/binary/valuetransfer/transaction/output.go
+++ b/packages/binary/valuetransfer/tangle/output.go
@@ -1,4 +1,4 @@
-package transaction
+package tangle
import (
"sync"
@@ -6,33 +6,40 @@ import (
"github.com/iotaledger/hive.go/marshalutil"
"github.com/iotaledger/hive.go/objectstorage"
+ "github.com/iotaledger/hive.go/stringify"
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
+ "github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
)
-var OutputKeyPartitions = objectstorage.PartitionKey([]int{address.Length, IdLength}...)
+var OutputKeyPartitions = objectstorage.PartitionKey([]int{address.Length, transaction.IdLength}...)
// Output represents the output of a Transaction and contains the balances and the identifiers for this output.
type Output struct {
address address.Address
- transactionId Id
+ transactionId transaction.Id
+ branchId BranchId
solid bool
solidificationTime time.Time
+ firstConsumer transaction.Id
+ consumerCount int
balances []*balance.Balance
solidMutex sync.RWMutex
solidificationTimeMutex sync.RWMutex
+ consumerMutex sync.RWMutex
objectstorage.StorableObjectFlags
storageKey []byte
}
// NewOutput creates an Output that contains the balances and identifiers of a Transaction.
-func NewOutput(address address.Address, transactionId Id, balances []*balance.Balance) *Output {
+func NewOutput(address address.Address, transactionId transaction.Id, branchId BranchId, balances []*balance.Balance) *Output {
return &Output{
address: address,
transactionId: transactionId,
+ branchId: branchId,
solid: false,
solidificationTime: time.Time{},
balances: balances,
@@ -93,26 +100,35 @@ func OutputFromStorageKey(keyBytes []byte, optionalTargetObject ...*Output) (res
if err != nil {
return
}
- result.transactionId, err = ParseId(marshalUtil)
+ result.transactionId, err = transaction.ParseId(marshalUtil)
if err != nil {
return
}
- result.storageKey = marshalutil.New(keyBytes[:OutputIdLength]).Bytes(true)
+ result.storageKey = marshalutil.New(keyBytes[:transaction.OutputIdLength]).Bytes(true)
consumedBytes = marshalUtil.ReadOffset()
return
}
+func (output *Output) Id() transaction.OutputId {
+ return transaction.NewOutputId(output.Address(), output.TransactionId())
+}
+
// Address returns the address that this output belongs to.
func (output *Output) Address() address.Address {
return output.address
}
// TransactionId returns the id of the Transaction, that created this output.
-func (output *Output) TransactionId() Id {
+func (output *Output) TransactionId() transaction.Id {
return output.transactionId
}
+// BranchId returns the id of the ledger state branch, that this output was booked in.
+func (output *Output) BranchId() BranchId {
+ return output.branchId
+}
+
// Solid returns true if the output has been marked as solid.
func (output *Output) Solid() bool {
output.solidMutex.RLock()
@@ -155,6 +171,20 @@ func (output *Output) SolidificationTime() time.Time {
return output.solidificationTime
}
+func (output *Output) RegisterConsumer(consumer transaction.Id) (consumerCount int, firstConsumerId transaction.Id) {
+ output.consumerMutex.Lock()
+ defer output.consumerMutex.Unlock()
+
+ if consumerCount = output.consumerCount; consumerCount == 0 {
+ output.firstConsumer = consumer
+ }
+ output.consumerCount++
+
+ firstConsumerId = output.firstConsumer
+
+ return
+}
+
// Balances returns the colored balances (color + balance) that this output contains.
func (output *Output) Balances() []*balance.Balance {
return output.balances
@@ -171,7 +201,7 @@ func (output *Output) Bytes() []byte {
// ObjectStorageKey returns the key that is used to store the object in the database.
// It is required to match StorableObject interface.
func (output *Output) ObjectStorageKey() []byte {
- return marshalutil.New(OutputIdLength).
+ return marshalutil.New(transaction.OutputIdLength).
WriteBytes(output.address.Bytes()).
WriteBytes(output.transactionId.Bytes()).
Bytes()
@@ -184,7 +214,8 @@ func (output *Output) ObjectStorageValue() []byte {
balanceCount := len(output.balances)
// initialize helper
- marshalUtil := marshalutil.New(marshalutil.BOOL_SIZE + marshalutil.TIME_SIZE + marshalutil.UINT32_SIZE + balanceCount*balance.Length)
+ marshalUtil := marshalutil.New(BranchIdLength + marshalutil.BOOL_SIZE + marshalutil.TIME_SIZE + marshalutil.UINT32_SIZE + balanceCount*balance.Length)
+ marshalUtil.WriteBytes(output.branchId.Bytes())
marshalUtil.WriteBool(output.solid)
marshalUtil.WriteTime(output.solidificationTime)
marshalUtil.WriteUint32(uint32(balanceCount))
@@ -199,6 +230,9 @@ func (output *Output) ObjectStorageValue() []byte {
// being stored in its key rather than the content of the database to reduce storage requirements.
func (output *Output) UnmarshalObjectStorageValue(data []byte) (err error, consumedBytes int) {
marshalUtil := marshalutil.New(data)
+ if output.branchId, err = ParseBranchId(marshalUtil); err != nil {
+ return
+ }
if output.solid, err = marshalUtil.ReadBool(); err != nil {
return
}
@@ -227,6 +261,17 @@ func (output *Output) Update(other objectstorage.StorableObject) {
panic("this object should never be updated")
}
+func (output *Output) String() string {
+ return stringify.Struct("Output",
+ stringify.StructField("address", output.Address()),
+ stringify.StructField("transactionId", output.TransactionId()),
+ stringify.StructField("branchId", output.BranchId()),
+ stringify.StructField("solid", output.Solid()),
+ stringify.StructField("solidificationTime", output.SolidificationTime()),
+ stringify.StructField("balances", output.Balances()),
+ )
+}
+
// define contract (ensure that the struct fulfills the given interface)
var _ objectstorage.StorableObject = &Output{}
@@ -254,7 +299,7 @@ func (cachedOutput *CachedOutput) Consume(consumer func(output *Output)) (consum
})
}
-type CachedOutputs []*CachedOutput
+type CachedOutputs map[transaction.OutputId]*CachedOutput
func (cachedOutputs CachedOutputs) Consume(consumer func(output *Output)) (consumed bool) {
for _, cachedOutput := range cachedOutputs {
@@ -266,4 +311,10 @@ func (cachedOutputs CachedOutputs) Consume(consumer func(output *Output)) (consu
return
}
+func (cachedOutputs CachedOutputs) Release(force ...bool) {
+ for _, cachedOutput := range cachedOutputs {
+ cachedOutput.Release(force...)
+ }
+}
+
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/packages/binary/valuetransfer/transaction/output_test.go b/packages/binary/valuetransfer/tangle/output_test.go
similarity index 84%
rename from packages/binary/valuetransfer/transaction/output_test.go
rename to packages/binary/valuetransfer/tangle/output_test.go
index d926bc72f4ac9aa42a5e498b25d0cecca6222de9..690668f158acc632f90ea56af2273f6e4dfa5191 100644
--- a/packages/binary/valuetransfer/transaction/output_test.go
+++ b/packages/binary/valuetransfer/tangle/output_test.go
@@ -1,4 +1,4 @@
-package transaction
+package tangle
import (
"testing"
@@ -8,13 +8,14 @@ import (
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
+ "github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
)
func TestNew(t *testing.T) {
randomAddress := address.Random()
- randomTransactionId := RandomId()
+ randomTransactionId := transaction.RandomId()
- output := NewOutput(randomAddress, randomTransactionId, []*balance.Balance{
+ output := NewOutput(randomAddress, randomTransactionId, MasterBranchId, []*balance.Balance{
balance.New(balance.COLOR_IOTA, 1337),
})
diff --git a/packages/binary/valuetransfer/tangle/tangle.go b/packages/binary/valuetransfer/tangle/tangle.go
index f19490fdbc289cf388f1aba4f58b35ad9919cfd3..a1aef9b378268a9383397b412106ddbdd3e02ada 100644
--- a/packages/binary/valuetransfer/tangle/tangle.go
+++ b/packages/binary/valuetransfer/tangle/tangle.go
@@ -2,12 +2,17 @@ package tangle
import (
"container/list"
+ "fmt"
+ "math"
+ "sort"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/iotaledger/hive.go/async"
+ "github.com/iotaledger/hive.go/marshalutil"
"github.com/iotaledger/hive.go/objectstorage"
"github.com/iotaledger/hive.go/types"
+ "golang.org/x/crypto/blake2b"
"github.com/iotaledger/goshimmer/packages/binary/storageprefix"
"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
@@ -28,11 +33,13 @@ type Tangle struct {
outputStorage *objectstorage.ObjectStorage
consumerStorage *objectstorage.ObjectStorage
missingOutputStorage *objectstorage.ObjectStorage
+ branchStorage *objectstorage.ObjectStorage
Events Events
storePayloadWorkerPool async.WorkerPool
solidifierWorkerPool async.WorkerPool
+ bookerWorkerPool async.WorkerPool
cleanupWorkerPool async.WorkerPool
}
@@ -48,7 +55,7 @@ func New(badgerInstance *badger.DB) (result *Tangle) {
// transaction related storage
attachmentStorage: osFactory.New(osAttachment, osAttachmentFactory, objectstorage.CacheTime(time.Second)),
- outputStorage: osFactory.New(osOutput, osOutputFactory, transaction.OutputKeyPartitions, objectstorage.CacheTime(time.Second)),
+ outputStorage: osFactory.New(osOutput, osOutputFactory, OutputKeyPartitions, objectstorage.CacheTime(time.Second)),
missingOutputStorage: osFactory.New(osMissingOutput, osMissingOutputFactory, MissingOutputKeyPartitions, objectstorage.CacheTime(time.Second)),
consumerStorage: osFactory.New(osConsumer, osConsumerFactory, ConsumerPartitionKeys, objectstorage.CacheTime(time.Second)),
@@ -78,8 +85,8 @@ func (tangle *Tangle) GetTransactionMetadata(transactionId transaction.Id) *Cach
return &CachedTransactionMetadata{CachedObject: tangle.missingOutputStorage.Load(transactionId.Bytes())}
}
-func (tangle *Tangle) GetTransactionOutput(outputId transaction.OutputId) *transaction.CachedOutput {
- return &transaction.CachedOutput{CachedObject: tangle.outputStorage.Load(outputId.Bytes())}
+func (tangle *Tangle) GetTransactionOutput(outputId transaction.OutputId) *CachedOutput {
+ return &CachedOutput{CachedObject: tangle.outputStorage.Load(outputId.Bytes())}
}
// GetApprovers retrieves the approvers of a payload from the object storage.
@@ -94,7 +101,7 @@ func (tangle *Tangle) GetApprovers(payloadId payload.Id) CachedApprovers {
return approvers
}
-// GetApprovers retrieves the approvers of a payload from the object storage.
+// GetConsumers retrieves the approvers of a payload from the object storage.
func (tangle *Tangle) GetConsumers(outputId transaction.OutputId) CachedConsumers {
consumers := make(CachedConsumers, 0)
tangle.consumerStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
@@ -200,12 +207,6 @@ func (tangle *Tangle) storeTransaction(tx *transaction.Transaction) (cachedTrans
return result
})}
- if transactionStored {
- tx.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) {
- tangle.outputStorage.Store(transaction.NewOutput(address, tx.Id(), balances))
- })
- }
-
return
}
@@ -232,18 +233,18 @@ func (tangle *Tangle) storeTransactionReferences(tx *transaction.Transaction) {
})
}
-// solidifyTransactionWorker is the worker function that solidifies the payloads (recursively from past to present).
-func (tangle *Tangle) solidifyTransactionWorker(cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
- popElementsFromStack := func(stack *list.List) (*payload.CachedPayload, *CachedPayloadMetadata, *CachedTransactionMetadata) {
- currentSolidificationEntry := stack.Front()
- currentCachedPayload := currentSolidificationEntry.Value.([3]interface{})[0]
- currentCachedMetadata := currentSolidificationEntry.Value.([3]interface{})[1]
- currentCachedTransactionMetadata := currentSolidificationEntry.Value.([3]interface{})[2]
- stack.Remove(currentSolidificationEntry)
+func (tangle *Tangle) popElementsFromSolidificationStack(stack *list.List) (*payload.CachedPayload, *CachedPayloadMetadata, *CachedTransactionMetadata) {
+ currentSolidificationEntry := stack.Front()
+ currentCachedPayload := currentSolidificationEntry.Value.([3]interface{})[0]
+ currentCachedMetadata := currentSolidificationEntry.Value.([3]interface{})[1]
+ currentCachedTransactionMetadata := currentSolidificationEntry.Value.([3]interface{})[2]
+ stack.Remove(currentSolidificationEntry)
- return currentCachedPayload.(*payload.CachedPayload), currentCachedMetadata.(*CachedPayloadMetadata), currentCachedTransactionMetadata.(*CachedTransactionMetadata)
- }
+ return currentCachedPayload.(*payload.CachedPayload), currentCachedMetadata.(*CachedPayloadMetadata), currentCachedTransactionMetadata.(*CachedTransactionMetadata)
+}
+// solidifyTransactionWorker is the worker function that solidifies the payloads (recursively from past to present).
+func (tangle *Tangle) solidifyTransactionWorker(cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
// initialize the stack
solidificationStack := list.New()
solidificationStack.PushBack([3]interface{}{cachedPayload, cachedMetadata, cachedTransactionMetadata})
@@ -253,7 +254,7 @@ func (tangle *Tangle) solidifyTransactionWorker(cachedPayload *payload.CachedPay
// execute logic inside a func, so we can use defer to release the objects
func() {
// retrieve cached objects
- currentCachedPayload, currentCachedMetadata, currentCachedTransactionMetadata := popElementsFromStack(solidificationStack)
+ currentCachedPayload, currentCachedMetadata, currentCachedTransactionMetadata := tangle.popElementsFromSolidificationStack(solidificationStack)
defer currentCachedPayload.Release()
defer currentCachedMetadata.Release()
defer currentCachedTransactionMetadata.Release()
@@ -262,118 +263,522 @@ func (tangle *Tangle) solidifyTransactionWorker(cachedPayload *payload.CachedPay
currentPayload := currentCachedPayload.Unwrap()
currentPayloadMetadata := currentCachedMetadata.Unwrap()
currentTransactionMetadata := currentCachedTransactionMetadata.Unwrap()
- currentTransaction := currentPayload.Transaction()
- // abort if any of the retrieved models is nil
- if currentPayload == nil || currentPayloadMetadata == nil || currentTransactionMetadata == nil {
+ // abort if any of the retrieved models is nil or payload is not solid
+ if currentPayload == nil || currentPayloadMetadata == nil || currentTransactionMetadata == nil || !tangle.isPayloadSolid(currentPayload, currentPayloadMetadata) {
return
}
- // abort if the entities are not solid
- if !tangle.isPayloadSolid(currentPayload, currentPayloadMetadata) || !tangle.isTransactionSolid(currentTransaction, currentTransactionMetadata) {
+ // abort if the transaction is not solid or invalid
+ if transactionSolid, err := tangle.isTransactionSolid(currentPayload.Transaction(), currentTransactionMetadata); !transactionSolid || err != nil {
+ if err != nil {
+ // TODO: TRIGGER INVALID TX + REMOVE TXS THAT APPROVE IT
+ fmt.Println(err)
+ }
+
return
}
// abort if the payload was marked as solid already (if a payload is solid already then the tx is also solid)
- payloadBecameSolid := currentPayloadMetadata.SetSolid(true)
- if !payloadBecameSolid {
+ if !currentPayloadMetadata.SetSolid(true) {
return
}
- // set the transaction related entities to be solid
- transactionBecameSolid := currentTransactionMetadata.SetSolid(true)
- if transactionBecameSolid {
- currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) {
- tangle.GetTransactionOutput(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(output *transaction.Output) {
- output.SetSolid(true)
- })
- })
- }
-
// ... trigger solid event ...
tangle.Events.PayloadSolid.Trigger(currentCachedPayload, currentCachedMetadata)
// ... and schedule check of approvers
- tangle.GetApprovers(currentPayload.Id()).Consume(func(approver *PayloadApprover) {
- approvingPayloadId := approver.GetApprovingPayloadId()
- approvingCachedPayload := tangle.GetPayload(approvingPayloadId)
-
- approvingCachedPayload.Consume(func(payload *payload.Payload) {
- solidificationStack.PushBack([3]interface{}{
- approvingCachedPayload,
- tangle.GetPayloadMetadata(approvingPayloadId),
- tangle.GetTransactionMetadata(payload.Transaction().Id()),
- })
- })
+ tangle.ForeachApprovers(currentPayload.Id(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
+ solidificationStack.PushBack([3]interface{}{payload, payloadMetadata, cachedTransactionMetadata})
})
- if !transactionBecameSolid {
+ // book the outputs
+ if !currentTransactionMetadata.SetSolid(true) {
return
}
- tangle.Events.TransactionSolid.Trigger(currentTransaction, currentTransactionMetadata)
+ tangle.Events.TransactionSolid.Trigger(currentPayload.Transaction(), currentTransactionMetadata)
- seenTransactions := make(map[transaction.Id]types.Empty)
- currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) {
- tangle.GetTransactionOutput(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(output *transaction.Output) {
- // trigger events
- })
+ tangle.ForEachConsumers(currentPayload.Transaction(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
+ solidificationStack.PushBack([3]interface{}{payload, payloadMetadata, cachedTransactionMetadata})
+ })
- tangle.GetConsumers(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(consumer *Consumer) {
- // keep track of the processed transactions (the same transaction can consume multiple outputs)
- if _, transactionSeen := seenTransactions[consumer.TransactionId()]; transactionSeen {
- seenTransactions[consumer.TransactionId()] = types.Void
-
- transactionMetadata := tangle.GetTransactionMetadata(consumer.TransactionId())
-
- // retrieve all the payloads that attached the transaction
- tangle.GetAttachments(consumer.TransactionId()).Consume(func(attachment *Attachment) {
- solidificationStack.PushBack([3]interface{}{
- tangle.GetPayload(attachment.PayloadId()),
- tangle.GetPayloadMetadata(attachment.PayloadId()),
- transactionMetadata,
- })
- })
- }
- })
+ payloadToBook := cachedPayload.Retain()
+ tangle.bookerWorkerPool.Submit(func() {
+ tangle.bookPayloadTransaction(payloadToBook)
})
}()
}
}
-func (tangle *Tangle) isTransactionSolid(transaction *transaction.Transaction, metadata *TransactionMetadata) bool {
- if transaction == nil || transaction.IsDeleted() {
- return false
+func (tangle *Tangle) bookPayloadTransaction(cachedPayload *payload.CachedPayload) {
+ payloadToBook := cachedPayload.Unwrap()
+ defer cachedPayload.Release()
+
+ if payloadToBook == nil {
+ return
}
+ transactionToBook := payloadToBook.Transaction()
- if metadata == nil || metadata.IsDeleted() {
- return false
+ consumedBranches := make(map[BranchId]types.Empty)
+ conflictingConsumersToFork := make(map[transaction.Id]types.Empty)
+ createFork := false
+
+ inputsSuccessfullyProcessed := payloadToBook.Transaction().Inputs().ForEach(func(outputId transaction.OutputId) bool {
+ cachedOutput := tangle.GetTransactionOutput(outputId)
+ defer cachedOutput.Release()
+
+ // abort if the output could not be found
+ output := cachedOutput.Unwrap()
+ if output == nil {
+ return false
+ }
+
+ consumedBranches[output.BranchId()] = types.Void
+
+ // continue if we are the first consumer and there is no double spend
+ consumerCount, firstConsumerId := output.RegisterConsumer(transactionToBook.Id())
+ if consumerCount == 0 {
+ return true
+ }
+
+ // fork into a new branch
+ createFork = true
+
+ // also fork the previous consumer
+ if consumerCount == 1 {
+ conflictingConsumersToFork[firstConsumerId] = types.Void
+ }
+
+ return true
+ })
+
+ if !inputsSuccessfullyProcessed {
+ return
}
- if metadata.Solid() {
+ transactionToBook.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+ newOutput := NewOutput(address, transactionToBook.Id(), MasterBranchId, balances)
+ newOutput.SetSolid(true)
+ tangle.outputStorage.Store(newOutput)
+
return true
+ })
+
+ fmt.Println(consumedBranches)
+ fmt.Println(MasterBranchId)
+ fmt.Println(createFork)
+}
+
+func (tangle *Tangle) InheritBranches(branches ...BranchId) (cachedAggregatedBranch *CachedBranch, err error) {
+ // return the MasterBranch if we have no branches in the parameters
+ if len(branches) == 0 {
+ cachedAggregatedBranch = tangle.GetBranch(MasterBranchId)
+
+ return
}
- // iterate through all transfers and check if they are solid
- return transaction.Inputs().ForEach(tangle.isOutputMarkedAsSolid)
+ if len(branches) == 1 {
+ cachedAggregatedBranch = tangle.GetBranch(branches[0])
+
+ return
+ }
+
+ // filter out duplicates and shared ancestor Branches (abort if we faced an error)
+ deepestCommonAncestors, err := tangle.findDeepestCommonAncestorBranches(branches...)
+ if err != nil {
+ return
+ }
+
+ // if there is only one branch that we found, then we are done
+ if len(deepestCommonAncestors) == 1 {
+ for _, firstBranchInList := range deepestCommonAncestors {
+ cachedAggregatedBranch = firstBranchInList
+ }
+
+ return
+ }
+
+ // if there is more than one parents: aggregate
+ aggregatedBranchId, aggregatedBranchParents, err := tangle.determineAggregatedBranchDetails(deepestCommonAncestors)
+ if err != nil {
+ return
+ }
+
+ newAggregatedBranchCreated := false
+ cachedAggregatedBranch = &CachedBranch{CachedObject: tangle.branchStorage.ComputeIfAbsent(aggregatedBranchId.Bytes(), func(key []byte) (object objectstorage.StorableObject) {
+ aggregatedReality := NewBranch(aggregatedBranchId, aggregatedBranchParents)
+
+ // TODO: FIX
+ /*
+ for _, parentRealityId := range aggregatedBranchParents {
+ tangle.GetBranch(parentRealityId).Consume(func(branch *Branch) {
+ branch.RegisterSubReality(aggregatedRealityId)
+ })
+ }
+ */
+
+ aggregatedReality.SetModified()
+
+ newAggregatedBranchCreated = true
+
+ return aggregatedReality
+ })}
+
+ if !newAggregatedBranchCreated {
+ fmt.Println("1")
+ // TODO: FIX
+ /*
+ aggregatedBranch := cachedAggregatedBranch.Unwrap()
+
+ for _, realityId := range aggregatedBranchParents {
+ if aggregatedBranch.AddParentReality(realityId) {
+ tangle.GetBranch(realityId).Consume(func(branch *Branch) {
+ branch.RegisterSubReality(aggregatedRealityId)
+ })
+ }
+ }
+ */
+ }
+
+ return
}
-func (tangle *Tangle) isOutputMarkedAsSolid(transactionOutputId transaction.OutputId) (result bool) {
- outputExists := tangle.GetTransactionOutput(transactionOutputId).Consume(func(output *transaction.Output) {
- result = output.Solid()
+func (tangle *Tangle) determineAggregatedBranchDetails(deepestCommonAncestors CachedBranches) (aggregatedBranchId BranchId, aggregatedBranchParents []BranchId, err error) {
+ aggregatedBranchParents = make([]BranchId, len(deepestCommonAncestors))
+
+ i := 0
+ aggregatedBranchConflictParents := make(CachedBranches)
+ for branchId, cachedBranch := range deepestCommonAncestors {
+ // release all following entries if we have encountered an error
+ if err != nil {
+ cachedBranch.Release()
+
+ continue
+ }
+
+ // store BranchId as parent
+ aggregatedBranchParents[i] = branchId
+ i++
+
+ // abort if we could not unwrap the Branch (should never happen)
+ branch := cachedBranch.Unwrap()
+ if branch == nil {
+ cachedBranch.Release()
+
+ err = fmt.Errorf("failed to unwrap brach '%s'", branchId)
+
+ continue
+ }
+
+ if branch.IsAggregated() {
+ aggregatedBranchConflictParents[branchId] = cachedBranch
+
+ continue
+ }
+
+ err = tangle.collectClosestConflictAncestors(branch, aggregatedBranchConflictParents)
+
+ cachedBranch.Release()
+ }
+
+ if err != nil {
+ aggregatedBranchConflictParents.Release()
+ aggregatedBranchConflictParents = nil
+
+ return
+ }
+
+ aggregatedBranchId = tangle.generateAggregatedBranchId(aggregatedBranchConflictParents)
+
+ return
+}
+
+func (tangle *Tangle) generateAggregatedBranchId(aggregatedBranches CachedBranches) BranchId {
+ counter := 0
+ branchIds := make([]BranchId, len(aggregatedBranches))
+ for branchId, cachedBranch := range aggregatedBranches {
+ branchIds[counter] = branchId
+
+ counter++
+
+ cachedBranch.Release()
+ }
+
+ sort.Slice(branchIds, func(i, j int) bool {
+ for k := 0; k < len(branchIds[k]); k++ {
+ if branchIds[i][k] < branchIds[j][k] {
+ return true
+ } else if branchIds[i][k] > branchIds[j][k] {
+ return false
+ }
+ }
+
+ return false
})
- if !outputExists {
- if cachedMissingOutput, missingOutputStored := tangle.missingOutputStorage.StoreIfAbsent(NewMissingOutput(transactionOutputId)); missingOutputStored {
- cachedMissingOutput.Consume(func(object objectstorage.StorableObject) {
- tangle.Events.OutputMissing.Trigger(object.(*MissingOutput).Id())
- })
+ marshalUtil := marshalutil.New(BranchIdLength * len(branchIds))
+ for _, branchId := range branchIds {
+ marshalUtil.WriteBytes(branchId.Bytes())
+ }
+
+ return blake2b.Sum256(marshalUtil.Bytes())
+}
+
+func (tangle *Tangle) collectClosestConflictAncestors(branch *Branch, closestConflictAncestors CachedBranches) (err error) {
+ // initialize stack
+ stack := list.New()
+ for _, parentRealityId := range branch.ParentBranches() {
+ stack.PushBack(parentRealityId)
+ }
+
+ // work through stack
+ processedBranches := make(map[BranchId]types.Empty)
+ for stack.Len() != 0 {
+ // iterate through the parents (in a func so we can used defer)
+ err = func() error {
+ // pop parent branch id from stack
+ firstStackElement := stack.Front()
+ defer stack.Remove(firstStackElement)
+ parentBranchId := stack.Front().Value.(BranchId)
+
+ // abort if the parent has been processed already
+ if _, branchProcessed := processedBranches[parentBranchId]; branchProcessed {
+ return nil
+ }
+ processedBranches[parentBranchId] = types.Void
+
+ // load parent branch from database
+ cachedParentBranch := tangle.GetBranch(parentBranchId)
+
+ // abort if the parent branch could not be found (should never happen)
+ parentBranch := cachedParentBranch.Unwrap()
+ if parentBranch == nil {
+ cachedParentBranch.Release()
+
+ return fmt.Errorf("failed to load branch '%s'", parentBranchId)
+ }
+
+ // if the parent Branch is not aggregated, then we have found the closest conflict ancestor
+ if !parentBranch.IsAggregated() {
+ closestConflictAncestors[parentBranchId] = cachedParentBranch
+
+ return nil
+ }
+
+ // queue parents for additional check (recursion)
+ for _, parentRealityId := range parentBranch.ParentBranches() {
+ stack.PushBack(parentRealityId)
+ }
+
+ // release the branch (we don't need it anymore)
+ cachedParentBranch.Release()
+
+ return nil
+ }()
+
+ if err != nil {
+ return
}
}
return
}
+// findDeepestCommonAncestorBranches takes a number of BranchIds and determines the most specialized Branches (furthest
+// away from the MasterBranch) in that list, that contains all of the named BranchIds.
+//
+// Example: If we hand in "A, B" and B has A as its parent, then the result will contain the Branch B, because B is a
+// child of A.
+func (tangle *Tangle) findDeepestCommonAncestorBranches(branches ...BranchId) (result CachedBranches, err error) {
+ result = make(CachedBranches)
+
+ processedBranches := make(map[BranchId]types.Empty)
+ for _, branchId := range branches {
+ err = func() error {
+ // continue, if we have processed this branch already
+ if _, exists := processedBranches[branchId]; exists {
+ return nil
+ }
+ processedBranches[branchId] = types.Void
+
+ // load branch from objectstorage
+ cachedBranch := tangle.GetBranch(branchId)
+
+ // abort if we could not load the CachedBranch
+ branch := cachedBranch.Unwrap()
+ if branch == nil {
+ cachedBranch.Release()
+
+ return fmt.Errorf("could not load branch '%s'", branchId)
+ }
+
+ // check branches position relative to already aggregated branches
+ for aggregatedBranchId, cachedAggregatedBranch := range result {
+ // abort if we can not load the branch
+ aggregatedBranch := cachedAggregatedBranch.Unwrap()
+ if aggregatedBranch == nil {
+ return fmt.Errorf("could not load branch '%s'", aggregatedBranchId)
+ }
+
+ // if the current branch is an ancestor of an already aggregated branch, then we have found the more
+ // "specialized" branch already and keep it
+ if isAncestor, ancestorErr := tangle.branchIsAncestorOfBranch(branch, aggregatedBranch); isAncestor || ancestorErr != nil {
+ return ancestorErr
+ }
+
+ // check if the aggregated Branch is an ancestor of the current Branch and abort if we face an error
+ isAncestor, ancestorErr := tangle.branchIsAncestorOfBranch(aggregatedBranch, branch)
+ if ancestorErr != nil {
+ return ancestorErr
+ }
+
+ // if the aggregated branch is an ancestor of the current branch, then we have found a more specialized
+ // Branch and replace the old one with this one.
+ if isAncestor {
+ // replace aggregated branch if we have found a more specialized on
+ delete(result, aggregatedBranchId)
+ cachedAggregatedBranch.Release()
+
+ result[branchId] = cachedBranch
+
+ return nil
+ }
+ }
+
+ // store the branch as a new aggregate candidate if it was not found to be in any relation with the already
+ // aggregated ones.
+ result[branchId] = cachedBranch
+
+ return nil
+ }()
+
+ // abort if an error occurred while processing the current branch
+ if err != nil {
+ result.Release()
+ result = nil
+
+ return
+ }
+ }
+
+ return
+}
+
+func (tangle *Tangle) branchIsAncestorOfBranch(ancestor *Branch, descendant *Branch) (isAncestor bool, err error) {
+ if ancestor.Id() == descendant.Id() {
+ return true, nil
+ }
+
+ ancestorBranches, err := tangle.getAncestorBranches(descendant)
+ if err != nil {
+ return
+ }
+
+ ancestorBranches.Consume(func(ancestorOfDescendant *Branch) {
+ if ancestorOfDescendant.Id() == ancestor.Id() {
+ isAncestor = true
+ }
+ })
+
+ return
+}
+
+func (tangle *Tangle) getAncestorBranches(branch *Branch) (ancestorBranches CachedBranches, err error) {
+ // initialize result
+ ancestorBranches = make(CachedBranches)
+
+ // initialize stack
+ stack := list.New()
+ for _, parentRealityId := range branch.ParentBranches() {
+ stack.PushBack(parentRealityId)
+ }
+
+ // work through stack
+ for stack.Len() != 0 {
+ // iterate through the parents (in a func so we can used defer)
+ err = func() error {
+ // pop parent branch id from stack
+ firstStackElement := stack.Front()
+ defer stack.Remove(firstStackElement)
+ parentBranchId := stack.Front().Value.(BranchId)
+
+ // abort if the parent has been processed already
+ if _, branchProcessed := ancestorBranches[parentBranchId]; branchProcessed {
+ return nil
+ }
+
+ // load parent branch from database
+ cachedParentBranch := tangle.GetBranch(parentBranchId)
+
+ // abort if the parent branch could not be founds (should never happen)
+ parentBranch := cachedParentBranch.Unwrap()
+ if parentBranch == nil {
+ cachedParentBranch.Release()
+
+ return fmt.Errorf("failed to unwrap branch '%s'", parentBranchId)
+ }
+
+ // store parent branch in result
+ ancestorBranches[parentBranchId] = cachedParentBranch
+
+ // queue parents for additional check (recursion)
+ for _, parentRealityId := range parentBranch.ParentBranches() {
+ stack.PushBack(parentRealityId)
+ }
+
+ return nil
+ }()
+
+ // abort if an error occurs while trying to process the parents
+ if err != nil {
+ ancestorBranches.Release()
+ ancestorBranches = nil
+
+ return
+ }
+ }
+
+ return
+}
+
+func (tangle *Tangle) GetBranch(branchId BranchId) *CachedBranch {
+ // TODO: IMPLEMENT
+ return nil
+}
+
+func (tangle *Tangle) ForeachApprovers(payloadId payload.Id, consume func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata)) {
+ tangle.GetApprovers(payloadId).Consume(func(approver *PayloadApprover) {
+ approvingPayloadId := approver.GetApprovingPayloadId()
+ approvingCachedPayload := tangle.GetPayload(approvingPayloadId)
+
+ approvingCachedPayload.Consume(func(payload *payload.Payload) {
+ consume(approvingCachedPayload, tangle.GetPayloadMetadata(approvingPayloadId), tangle.GetTransactionMetadata(payload.Transaction().Id()))
+ })
+ })
+}
+
+func (tangle *Tangle) ForEachConsumers(currentTransaction *transaction.Transaction, consume func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata)) {
+ seenTransactions := make(map[transaction.Id]types.Empty)
+ currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+ tangle.GetConsumers(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(consumer *Consumer) {
+ // keep track of the processed transactions (the same transaction can consume multiple outputs)
+ if _, transactionSeen := seenTransactions[consumer.TransactionId()]; transactionSeen {
+ seenTransactions[consumer.TransactionId()] = types.Void
+
+ transactionMetadata := tangle.GetTransactionMetadata(consumer.TransactionId())
+
+ // retrieve all the payloads that attached the transaction
+ tangle.GetAttachments(consumer.TransactionId()).Consume(func(attachment *Attachment) {
+ consume(tangle.GetPayload(attachment.PayloadId()), tangle.GetPayloadMetadata(attachment.PayloadId()), transactionMetadata)
+ })
+ }
+ })
+
+ return true
+ })
+}
+
// isPayloadSolid returns true if the given payload is solid. A payload is considered to be solid solid, if it is either
// already marked as solid or if its referenced payloads are marked as solid.
func (tangle *Tangle) isPayloadSolid(payload *payload.Payload, metadata *PayloadMetadata) bool {
@@ -420,3 +825,160 @@ func (tangle *Tangle) isPayloadMarkedAsSolid(payloadId payload.Id) bool {
return true
}
+
+func (tangle *Tangle) isTransactionSolid(tx *transaction.Transaction, metadata *TransactionMetadata) (bool, error) {
+ // abort if any of the models are nil or has been deleted
+ if tx == nil || tx.IsDeleted() || metadata == nil || metadata.IsDeleted() {
+ return false, nil
+ }
+
+ // abort if we have previously determined the solidity status of the transaction already
+ if metadata.Solid() {
+ return true, nil
+ }
+
+ // get outputs that were referenced in the transaction inputs
+ cachedInputs := tangle.getCachedOutputsFromTransactionInputs(tx)
+ defer cachedInputs.Release()
+
+ // check the solidity of the inputs and retrieve the consumed balances
+ inputsSolid, consumedBalances, err := tangle.checkTransactionInputs(cachedInputs)
+
+ // abort if an error occurred or the inputs are not solid, yet
+ if !inputsSolid || err != nil {
+ return false, err
+ }
+
+ if !tangle.checkTransactionOutputs(consumedBalances, tx.Outputs()) {
+ return false, fmt.Errorf("the outputs do not match the inputs in transaction with id '%s'", tx.Id())
+ }
+
+ return true, nil
+}
+
+func (tangle *Tangle) getCachedOutputsFromTransactionInputs(tx *transaction.Transaction) (result CachedOutputs) {
+ result = make(CachedOutputs)
+ tx.Inputs().ForEach(func(inputId transaction.OutputId) bool {
+ result[inputId] = tangle.GetTransactionOutput(inputId)
+
+ return true
+ })
+
+ return
+}
+
+func (tangle *Tangle) checkTransactionInputs(cachedInputs CachedOutputs) (inputsSolid bool, consumedBalances map[balance.Color]int64, err error) {
+ inputsSolid = true
+ consumedBalances = make(map[balance.Color]int64)
+
+ for inputId, cachedInput := range cachedInputs {
+ if !cachedInput.Exists() {
+ inputsSolid = false
+
+ if cachedMissingOutput, missingOutputStored := tangle.missingOutputStorage.StoreIfAbsent(NewMissingOutput(inputId)); missingOutputStored {
+ cachedMissingOutput.Consume(func(object objectstorage.StorableObject) {
+ tangle.Events.OutputMissing.Trigger(object.(*MissingOutput).Id())
+ })
+ }
+
+ continue
+ }
+
+ // should never be nil as we check Exists() before
+ input := cachedInput.Unwrap()
+
+ // update solid status
+ inputsSolid = inputsSolid && input.Solid()
+
+ // calculate the input balances
+ for _, inputBalance := range input.Balances() {
+ var newBalance int64
+ if currentBalance, balanceExists := consumedBalances[inputBalance.Color()]; balanceExists {
+ // check overflows in the numbers
+ if inputBalance.Value() > math.MaxInt64-currentBalance {
+ err = fmt.Errorf("buffer overflow in balances of inputs")
+
+ return
+ }
+
+ newBalance = currentBalance + inputBalance.Value()
+ } else {
+ newBalance = inputBalance.Value()
+ }
+ consumedBalances[inputBalance.Color()] = newBalance
+ }
+ }
+
+ return
+}
+
+// checkTransactionOutputs is a utility function that returns true, if the outputs are consuming all of the given inputs
+// (the sum of all the balance changes is 0). It also accounts for the ability to "recolor" coins during the creating of
+// outputs. If this function returns false, then the outputs that are defined in the transaction are invalid and the
+// transaction should be removed from the ledger state.
+func (tangle *Tangle) checkTransactionOutputs(inputBalances map[balance.Color]int64, outputs *transaction.Outputs) bool {
+ // create a variable to keep track of outputs that create a new color
+ var newlyColoredCoins int64
+
+ // iterate through outputs and check them one by one
+ aborted := !outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool {
+ for _, outputBalance := range balances {
+ // abort if the output creates a negative or empty output
+ if outputBalance.Value() <= 0 {
+ return false
+ }
+
+ // sidestep logic if we have a newly colored output (we check the supply later)
+ if outputBalance.Color() == balance.COLOR_NEW {
+ // catch overflows
+ if newlyColoredCoins > math.MaxInt64-outputBalance.Value() {
+ return false
+ }
+
+ newlyColoredCoins += outputBalance.Value()
+
+ continue
+ }
+
+ // check if the used color does not exist in our supply
+ availableBalance, spentColorExists := inputBalances[outputBalance.Color()]
+ if !spentColorExists {
+ return false
+ }
+
+ // abort if we spend more coins of the given color than we have
+ if availableBalance < outputBalance.Value() {
+ return false
+ }
+
+ // subtract the spent coins from the supply of this transaction
+ inputBalances[outputBalance.Color()] -= outputBalance.Value()
+
+ // cleanup the entry in the supply map if we have exhausted all funds
+ if inputBalances[outputBalance.Color()] == 0 {
+ delete(inputBalances, outputBalance.Color())
+ }
+ }
+
+ return true
+ })
+
+ // abort if the previous checks failed
+ if aborted {
+ return false
+ }
+
+ // determine the unspent inputs
+ var unspentCoins int64
+ for _, unspentBalance := range inputBalances {
+ // catch overflows
+ if unspentCoins > math.MaxInt64-unspentBalance {
+ return false
+ }
+
+ unspentCoins += unspentBalance
+ }
+
+ // the outputs are valid if they spend all outputs
+ return unspentCoins == newlyColoredCoins
+}
diff --git a/packages/binary/valuetransfer/tangle/tangle_test.go b/packages/binary/valuetransfer/tangle/tangle_test.go
index 2b21059b6437736a6dca02037aef47004ebf9aa7..9980c5bedbefceef1bb6f2111c1fcd152c6f1b66 100644
--- a/packages/binary/valuetransfer/tangle/tangle_test.go
+++ b/packages/binary/valuetransfer/tangle/tangle_test.go
@@ -1,13 +1,12 @@
package tangle
import (
- "fmt"
"io/ioutil"
"os"
"testing"
+ "time"
"github.com/iotaledger/hive.go/crypto/ed25519"
- "github.com/iotaledger/hive.go/events"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -52,31 +51,47 @@ func TestTangle_AttachPayload(t *testing.T) {
return
}
- tangle.Events.PayloadSolid.Attach(events.NewClosure(func(payload *payload.CachedPayload, metadata *CachedPayloadMetadata) {
- fmt.Println(payload.Unwrap())
-
- payload.Release()
- metadata.Release()
- }))
-
addressKeyPair1 := ed25519.GenerateKeyPair()
addressKeyPair2 := ed25519.GenerateKeyPair()
transferId1, _ := transaction.IdFromBase58("8opHzTAnfzRpPEx21XtnrVTX28YQuCpAjcn1PczScKh")
transferId2, _ := transaction.IdFromBase58("4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM")
- tangle.AttachPayload(payload.New(payload.GenesisId, payload.GenesisId, transaction.New(
+ input1 := NewOutput(address.FromED25519PubKey(addressKeyPair1.PublicKey), transferId1, MasterBranchId, []*balance.Balance{
+ balance.New(balance.COLOR_IOTA, 337),
+ })
+ input1.SetSolid(true)
+ input2 := NewOutput(address.FromED25519PubKey(addressKeyPair2.PublicKey), transferId2, MasterBranchId, []*balance.Balance{
+ balance.New(balance.COLOR_IOTA, 1000),
+ })
+ input2.SetSolid(true)
+
+ tangle.outputStorage.Store(input1)
+ tangle.outputStorage.Store(input2)
+
+ outputAddress := address.Random()
+
+ tx := transaction.New(
transaction.NewInputs(
- transaction.NewOutputId(address.FromED25519PubKey(addressKeyPair1.PublicKey), transferId1),
- transaction.NewOutputId(address.FromED25519PubKey(addressKeyPair2.PublicKey), transferId2),
+ input1.Id(),
+ input2.Id(),
),
transaction.NewOutputs(map[address.Address][]*balance.Balance{
- address.Random(): {
- balance.New(balance.COLOR_IOTA, 1337),
+ outputAddress: {
+ balance.New(balance.COLOR_NEW, 1337),
},
}),
- )))
+ )
+
+ tangle.AttachPayload(payload.New(payload.GenesisId, payload.GenesisId, tx))
+
+ time.Sleep(1 * time.Second)
+
+ outputFound := tangle.GetTransactionOutput(transaction.NewOutputId(outputAddress, tx.Id())).Consume(func(output *Output) {
+ assert.Equal(t, true, output.Solid())
+ })
+ assert.Equal(t, true, outputFound)
tangle.Shutdown()
}
diff --git a/packages/binary/valuetransfer/transaction/outputs.go b/packages/binary/valuetransfer/transaction/outputs.go
index daf995bbea3799e65868699dad447f0f79ec9fd0..8e1cea9830f29922031b296a454600eb7b3a676e 100644
--- a/packages/binary/valuetransfer/transaction/outputs.go
+++ b/packages/binary/valuetransfer/transaction/outputs.go
@@ -90,11 +90,9 @@ func (outputs *Outputs) Add(address address.Address, balances []*balance.Balance
return outputs
}
-func (outputs *Outputs) ForEach(consumer func(address address.Address, balances []*balance.Balance)) {
- outputs.OrderedMap.ForEach(func(key, value interface{}) bool {
- consumer(key.(address.Address), value.([]*balance.Balance))
-
- return true
+func (outputs *Outputs) ForEach(consumer func(address address.Address, balances []*balance.Balance) bool) bool {
+ return outputs.OrderedMap.ForEach(func(key, value interface{}) bool {
+ return consumer(key.(address.Address), value.([]*balance.Balance))
})
}
@@ -108,13 +106,15 @@ func (outputs *Outputs) Bytes() []byte {
}
marshalUtil.WriteUint32(uint32(outputs.Size()))
- outputs.ForEach(func(address address.Address, balances []*balance.Balance) {
+ outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool {
marshalUtil.WriteBytes(address.Bytes())
marshalUtil.WriteUint32(uint32(len(balances)))
for _, balance := range balances {
marshalUtil.WriteBytes(balance.Bytes())
}
+
+ return true
})
return marshalUtil.Bytes()
@@ -127,7 +127,7 @@ func (outputs *Outputs) String() string {
result := "[\n"
empty := true
- outputs.ForEach(func(address address.Address, balances []*balance.Balance) {
+ outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool {
empty = false
result += " " + address.String() + ": [\n"
@@ -144,6 +144,8 @@ func (outputs *Outputs) String() string {
}
result += " ]\n"
+
+ return true
})
if empty {