diff --git a/dapps/valuetransfers/packages/branchmanager/branchmanager.go b/dapps/valuetransfers/packages/branchmanager/branchmanager.go
index e2984d6851b6d18acea98adca37f6256a2351837..cb9b4a3f94471e50bdb0f1e6af34842157d33366 100644
--- a/dapps/valuetransfers/packages/branchmanager/branchmanager.go
+++ b/dapps/valuetransfers/packages/branchmanager/branchmanager.go
@@ -354,6 +354,28 @@ func (branchManager *BranchManager) SetBranchFinalized(branchID BranchID) (modif
 	return branchManager.setBranchFinalized(branchManager.Branch(branchID))
 }
 
+// GenerateAggregatedBranchID generates an aggregated BranchID from the handed in BranchIDs.
+func (branchManager *BranchManager) GenerateAggregatedBranchID(branchIDs ...BranchID) BranchID {
+	sort.Slice(branchIDs, func(i, j int) bool {
+		for k := 0; k < len(branchIDs[k]); k++ {
+			if branchIDs[i][k] < branchIDs[j][k] {
+				return true
+			} else if branchIDs[i][k] > branchIDs[j][k] {
+				return false
+			}
+		}
+
+		return false
+	})
+
+	marshalUtil := marshalutil.New(BranchIDLength * len(branchIDs))
+	for _, branchID := range branchIDs {
+		marshalUtil.WriteBytes(branchID.Bytes())
+	}
+
+	return blake2b.Sum256(marshalUtil.Bytes())
+}
+
 func (branchManager *BranchManager) setBranchFinalized(cachedBranch *CachedBranch) (modified bool, err error) {
 	defer cachedBranch.Release()
 	branch := cachedBranch.Unwrap()
@@ -369,6 +391,11 @@ func (branchManager *BranchManager) setBranchFinalized(cachedBranch *CachedBranc
 
 	branchManager.Events.BranchFinalized.Trigger(cachedBranch)
 
+	// propagate finalized to aggregated child branches
+	if err = branchManager.propagateFinalizedToAggregatedChildBranches(cachedBranch.Retain()); err != nil {
+		return
+	}
+
 	if !branch.Preferred() {
 		branchManager.propagateRejectedToChildBranches(cachedBranch.Retain())
 
@@ -394,11 +421,62 @@ func (branchManager *BranchManager) setBranchFinalized(cachedBranch *CachedBranc
 		})
 	}
 
+	// schedule confirmed checks of children
 	err = branchManager.propagateConfirmedToChildBranches(cachedBranch.Retain())
 
 	return
 }
 
+// propagateFinalizedToAggregatedChildBranches propagates the finalized flag to the aggregated child branches of the
+// given branch. An aggregated branch is finalized if all of its parents are finalized.
+func (branchManager *BranchManager) propagateFinalizedToAggregatedChildBranches(cachedBranch *CachedBranch) (err error) {
+	// initialize stack with the child branches of the given branch
+	propagationStack := list.New()
+	cachedBranch.Consume(func(branch *Branch) {
+		branchManager.ChildBranches(branch.ID()).Consume(func(childBranch *ChildBranch) {
+			propagationStack.PushBack(branchManager.Branch(childBranch.ChildID()))
+		})
+	})
+
+	// iterate through stack to propagate the changes to child branches
+	for propagationStack.Len() >= 1 {
+		stackElement := propagationStack.Front()
+		stackElement.Value.(*CachedBranch).Consume(func(branch *Branch) {
+			// abort if the branch is not aggregated
+			if !branch.IsAggregated() {
+				return
+			}
+
+			// abort if not all parents are confirmed
+			for _, parentBranchID := range branch.ParentBranches() {
+				cachedParentBranch := branchManager.Branch(parentBranchID)
+				if parentBranch := cachedParentBranch.Unwrap(); parentBranch == nil || !parentBranch.Finalized() {
+					cachedParentBranch.Release()
+
+					return
+				}
+				cachedParentBranch.Release()
+			}
+
+			// abort if the branch was finalized already
+			if !branch.setFinalized(true) {
+				return
+			}
+
+			// trigger events
+			branchManager.Events.BranchFinalized.Trigger(cachedBranch)
+
+			// schedule confirmed checks of children
+			branchManager.ChildBranches(branch.ID()).Consume(func(childBranch *ChildBranch) {
+				propagationStack.PushBack(branchManager.Branch(childBranch.childID))
+			})
+		})
+		propagationStack.Remove(stackElement)
+	}
+
+	return
+}
+
 func (branchManager *BranchManager) propagateRejectedToChildBranches(cachedBranch *CachedBranch) {
 	branchStack := list.New()
 	branchStack.PushBack(cachedBranch)
@@ -459,9 +537,9 @@ func (branchManager *BranchManager) propagateConfirmedToChildBranches(cachedBran
 			branchManager.Events.BranchConfirmed.Trigger(cachedBranch)
 
 			// schedule confirmed checks of children
-			for _, cachedChildBranch := range branchManager.ChildBranches(branch.ID()) {
-				propagationStack.PushBack(cachedChildBranch)
-			}
+			branchManager.ChildBranches(branch.ID()).Consume(func(childBranch *ChildBranch) {
+				propagationStack.PushBack(branchManager.Branch(childBranch.childID))
+			})
 		})
 		propagationStack.Remove(stackElement)
 	}
@@ -839,24 +917,7 @@ func (branchManager *BranchManager) generateAggregatedBranchID(aggregatedBranche
 		cachedBranch.Release()
 	}
 
-	sort.Slice(branchIDs, func(i, j int) bool {
-		for k := 0; k < len(branchIDs[k]); k++ {
-			if branchIDs[i][k] < branchIDs[j][k] {
-				return true
-			} else if branchIDs[i][k] > branchIDs[j][k] {
-				return false
-			}
-		}
-
-		return false
-	})
-
-	marshalUtil := marshalutil.New(BranchIDLength * len(branchIDs))
-	for _, branchID := range branchIDs {
-		marshalUtil.WriteBytes(branchID.Bytes())
-	}
-
-	return blake2b.Sum256(marshalUtil.Bytes())
+	return branchManager.GenerateAggregatedBranchID(branchIDs...)
 }
 
 func (branchManager *BranchManager) collectClosestConflictAncestors(branch *Branch, closestConflictAncestors CachedBranches) (err error) {
diff --git a/dapps/valuetransfers/packages/branchmanager/objectstorage.go b/dapps/valuetransfers/packages/branchmanager/objectstorage.go
index b3a95c655131c13951f7e7ccd3dd814863bfe131..fede032fbef8ed4df734b530299f7752fe865974 100644
--- a/dapps/valuetransfers/packages/branchmanager/objectstorage.go
+++ b/dapps/valuetransfers/packages/branchmanager/objectstorage.go
@@ -18,7 +18,7 @@ const (
 )
 
 var (
-	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(true, objectstorage.LeakDetectionOptions{
+	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(false, objectstorage.LeakDetectionOptions{
 		MaxConsumersPerObject: 10,
 		MaxConsumerHoldTime:   10 * time.Second,
 	})
diff --git a/dapps/valuetransfers/packages/tangle/attachment.go b/dapps/valuetransfers/packages/tangle/attachment.go
index 051deee1dae0f6472cda506dce40797d70696e02..e7b41335b57948f0f9eb57bd1585d4831e1a1f69 100644
--- a/dapps/valuetransfers/packages/tangle/attachment.go
+++ b/dapps/valuetransfers/packages/tangle/attachment.go
@@ -1,12 +1,11 @@
 package tangle
 
 import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
-
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 // Attachment stores the information which transaction was attached by which payload. We need this to be able to perform
diff --git a/dapps/valuetransfers/packages/tangle/attachment_test.go b/dapps/valuetransfers/packages/tangle/attachment_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..186214ed29b835cc6d2467bde5d469324748e3ad
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/attachment_test.go
@@ -0,0 +1,28 @@
+package tangle
+
+import (
+	"testing"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestAttachment(t *testing.T) {
+	transactionID := transaction.RandomID()
+	payloadID := payload.RandomID()
+
+	attachment := NewAttachment(transactionID, payloadID)
+
+	assert.Equal(t, transactionID, attachment.TransactionID())
+	assert.Equal(t, payloadID, attachment.PayloadID())
+
+	clonedAttachment, consumedBytes, err := AttachmentFromBytes(attachment.Bytes())
+	if err != nil {
+		panic(err)
+	}
+
+	assert.Equal(t, AttachmentLength, consumedBytes)
+	assert.Equal(t, transactionID, clonedAttachment.TransactionID())
+	assert.Equal(t, payloadID, clonedAttachment.PayloadID())
+}
diff --git a/dapps/valuetransfers/packages/tangle/consumer.go b/dapps/valuetransfers/packages/tangle/consumer.go
index 201a89a784f438db96ce1ecb74c893614de694b2..1fdcacb63ae9f3bc7cef6438973bb7e3adf83f79 100644
--- a/dapps/valuetransfers/packages/tangle/consumer.go
+++ b/dapps/valuetransfers/packages/tangle/consumer.go
@@ -1,12 +1,11 @@
 package tangle
 
 import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
-
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 // ConsumerPartitionKeys defines the "layout" of the key. This enables prefix iterations in the objectstorage.
diff --git a/dapps/valuetransfers/packages/tangle/debugger.go b/dapps/valuetransfers/packages/tangle/debugger.go
new file mode 100644
index 0000000000000000000000000000000000000000..976c5270f839abafc15fdb8bb36518ea4ee813de
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/debugger.go
@@ -0,0 +1,115 @@
+package tangle
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Debugger represents a utility that allows us to print debug messages and function calls.
+type Debugger struct {
+	aliases map[interface{}]string
+	enabled bool
+	indent  int
+}
+
+// NewDebugger is the constructor of a debugger instance.
+func NewDebugger() *Debugger {
+	return (&Debugger{}).ResetAliases()
+}
+
+// Enable sets the debugger to print the debug information.
+func (debugger *Debugger) Enable() {
+	debugger.enabled = true
+
+	fmt.Println("[DEBUGGER::ENABLED]")
+}
+
+// Disable sets the debugger to not print any debug information.
+func (debugger *Debugger) Disable() {
+	fmt.Println("[DEBUGGER::DISABLED]")
+	debugger.enabled = false
+}
+
+// ResetAliases removes any previously registered aliases. This can be useful if the same debugger instance is for
+// example used in different tests or test cases.
+func (debugger *Debugger) ResetAliases() *Debugger {
+	debugger.aliases = make(map[interface{}]string)
+
+	return debugger
+}
+
+// RegisterAlias registers a string representation for the given element. This can be used to create a string
+// representation for things like ids in the form of byte slices.
+func (debugger *Debugger) RegisterAlias(element interface{}, alias string) {
+	debugger.aliases[element] = alias
+}
+
+// FunctionCall prints debug information about a function call. It automatically indents all following debug outputs
+// until Return() is called. The best way to use this is by starting a function call with a construct like:
+//
+// defer debugger.FunctionCall("myFunction", param1, param2).Return()
+func (debugger *Debugger) FunctionCall(identifier string, params ...interface{}) *Debugger {
+	if !debugger.enabled {
+		return debugger
+	}
+
+	debugger.Print(identifier + "(" + debugger.paramsAsCommaSeparatedList(params...) + ") {")
+	debugger.indent++
+
+	return debugger
+}
+
+// Return prints debug information about a FunctionCall() the was finished. It reduces the indentation for consecutive
+// debug outputs.
+func (debugger *Debugger) Return() *Debugger {
+	if !debugger.enabled {
+		return debugger
+	}
+
+	debugger.indent--
+	debugger.Print("}")
+
+	return debugger
+}
+
+// Print prints an arbitrary debug message that can for example be used to print an information when a certain part of
+// the code is executed.
+func (debugger *Debugger) Print(identifier string, params ...interface{}) {
+	if !debugger.enabled {
+		return
+	}
+
+	if len(params) >= 1 {
+		debugger.print(identifier + " = " + debugger.paramsAsCommaSeparatedList(params...))
+	} else {
+		debugger.print(identifier)
+	}
+}
+
+// print is an internal utility function that actually prints the given string to stdout.
+func (debugger *Debugger) print(stringToPrint string) {
+	fmt.Println("[DEBUGGER] " + strings.Repeat("    ", debugger.indent) + stringToPrint)
+}
+
+// paramsAsCommaSeparatedList creates a comma separated list of the given parameters.
+func (debugger *Debugger) paramsAsCommaSeparatedList(params ...interface{}) string {
+	paramsAsStrings := make([]string, len(params))
+	for i, param := range params {
+		paramsAsStrings[i] = debugger.paramAsString(param)
+	}
+
+	return strings.Join(paramsAsStrings, ", ")
+}
+
+// paramAsString returns a string representation of an arbitrary parameter.
+func (debugger *Debugger) paramAsString(param interface{}) string {
+	defer func() { recover() }()
+	if alias, aliasExists := debugger.aliases[param]; aliasExists {
+		return alias
+	}
+
+	return fmt.Sprint(param)
+}
+
+// debugger contains the default global debugger instance.
+var debugger = NewDebugger()
diff --git a/dapps/valuetransfers/packages/tangle/events.go b/dapps/valuetransfers/packages/tangle/events.go
index c19fa3530f86aa5d0d7dd60887d579fbe572d824..8194f08f1e9d8e2fe71bc17b0627e41cd276cf84 100644
--- a/dapps/valuetransfers/packages/tangle/events.go
+++ b/dapps/valuetransfers/packages/tangle/events.go
@@ -1,11 +1,10 @@
 package tangle
 
 import (
-	"github.com/iotaledger/hive.go/events"
-
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/hive.go/events"
 )
 
 // Events is a container for the different kind of events of the Tangle.
@@ -19,11 +18,18 @@ type Events struct {
 	PayloadDisliked        *events.Event
 	MissingPayloadReceived *events.Event
 	PayloadMissing         *events.Event
+	PayloadInvalid         *events.Event
 	PayloadUnsolidifiable  *events.Event
 
 	// TransactionReceived gets triggered whenever a transaction was received for the first time (not solid yet).
 	TransactionReceived *events.Event
 
+	// TransactionInvalid gets triggered whenever we receive an invalid transaction.
+	TransactionInvalid *events.Event
+
+	// TransactionSolid gets triggered whenever a transaction becomes solid for the first time.
+	TransactionSolid *events.Event
+
 	// TransactionBooked gets triggered whenever a transactions becomes solid and gets booked into a particular branch.
 	TransactionBooked *events.Event
 
@@ -74,8 +80,11 @@ func newEvents() *Events {
 		PayloadDisliked:        events.NewEvent(cachedPayloadEvent),
 		MissingPayloadReceived: events.NewEvent(cachedPayloadEvent),
 		PayloadMissing:         events.NewEvent(payloadIDEvent),
+		PayloadInvalid:         events.NewEvent(cachedPayloadErrorEvent),
 		PayloadUnsolidifiable:  events.NewEvent(payloadIDEvent),
 		TransactionReceived:    events.NewEvent(cachedTransactionAttachmentEvent),
+		TransactionInvalid:     events.NewEvent(cachedTransactionErrorEvent),
+		TransactionSolid:       events.NewEvent(cachedTransactionEvent),
 		TransactionBooked:      events.NewEvent(transactionBookedEvent),
 		TransactionPreferred:   events.NewEvent(cachedTransactionEvent),
 		TransactionUnpreferred: events.NewEvent(cachedTransactionEvent),
@@ -100,6 +109,14 @@ func cachedPayloadEvent(handler interface{}, params ...interface{}) {
 	)
 }
 
+func cachedPayloadErrorEvent(handler interface{}, params ...interface{}) {
+	handler.(func(*payload.CachedPayload, *CachedPayloadMetadata, error))(
+		params[0].(*payload.CachedPayload).Retain(),
+		params[1].(*CachedPayloadMetadata).Retain(),
+		params[2].(error),
+	)
+}
+
 func transactionBookedEvent(handler interface{}, params ...interface{}) {
 	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, bool))(
 		params[0].(*transaction.CachedTransaction).Retain(),
@@ -124,6 +141,14 @@ func cachedTransactionEvent(handler interface{}, params ...interface{}) {
 	)
 }
 
+func cachedTransactionErrorEvent(handler interface{}, params ...interface{}) {
+	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, error))(
+		params[0].(*transaction.CachedTransaction).Retain(),
+		params[1].(*CachedTransactionMetadata).Retain(),
+		params[2].(error),
+	)
+}
+
 func cachedTransactionAttachmentEvent(handler interface{}, params ...interface{}) {
 	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, *CachedAttachment))(
 		params[0].(*transaction.CachedTransaction).Retain(),
diff --git a/dapps/valuetransfers/packages/tangle/imgs/concurrency.png b/dapps/valuetransfers/packages/tangle/imgs/concurrency.png
new file mode 100644
index 0000000000000000000000000000000000000000..5238ce8b1d2b3b3b7f9c4e5ab095eac0651bcd0b
Binary files /dev/null and b/dapps/valuetransfers/packages/tangle/imgs/concurrency.png differ
diff --git a/dapps/valuetransfers/packages/tangle/imgs/reverse-transaction-solidification.png b/dapps/valuetransfers/packages/tangle/imgs/reverse-transaction-solidification.png
new file mode 100644
index 0000000000000000000000000000000000000000..0d63b99fe60c208e457b1a3d3d249fd27ca32a95
Binary files /dev/null and b/dapps/valuetransfers/packages/tangle/imgs/reverse-transaction-solidification.png differ
diff --git a/dapps/valuetransfers/packages/tangle/imgs/reverse-valueobject-solidification.png b/dapps/valuetransfers/packages/tangle/imgs/reverse-valueobject-solidification.png
new file mode 100644
index 0000000000000000000000000000000000000000..3c1d3ecf6da6f78bdcdd06fb3bb104e785af3331
Binary files /dev/null and b/dapps/valuetransfers/packages/tangle/imgs/reverse-valueobject-solidification.png differ
diff --git a/dapps/valuetransfers/packages/tangle/imgs/scenario1.png b/dapps/valuetransfers/packages/tangle/imgs/scenario1.png
new file mode 100644
index 0000000000000000000000000000000000000000..73b9b7cd0be6ce152a07d59bc5433179eb942b3f
Binary files /dev/null and b/dapps/valuetransfers/packages/tangle/imgs/scenario1.png differ
diff --git a/dapps/valuetransfers/packages/tangle/imgs/scenario2.png b/dapps/valuetransfers/packages/tangle/imgs/scenario2.png
new file mode 100644
index 0000000000000000000000000000000000000000..653997ff5e2e5d8ce3fc7ef8df378bac6ce85bd8
Binary files /dev/null and b/dapps/valuetransfers/packages/tangle/imgs/scenario2.png differ
diff --git a/dapps/valuetransfers/packages/tangle/missingoutput.go b/dapps/valuetransfers/packages/tangle/missingoutput.go
index d04a3e8f600fc75f84f4a1868b563c1e91c5fe04..1fc6687bf4b800c1001a780fb9a84c2b7d0d0bbe 100644
--- a/dapps/valuetransfers/packages/tangle/missingoutput.go
+++ b/dapps/valuetransfers/packages/tangle/missingoutput.go
@@ -3,11 +3,10 @@ package tangle
 import (
 	"time"
 
-	"github.com/iotaledger/hive.go/marshalutil"
-	"github.com/iotaledger/hive.go/objectstorage"
-
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
 )
 
 // MissingOutputKeyPartitions defines the "layout" of the key. This enables prefix iterations in the objectstorage.
diff --git a/dapps/valuetransfers/packages/tangle/missingpayload.go b/dapps/valuetransfers/packages/tangle/missingpayload.go
index ef439b0ccc9b7e99ecb58e787d5edacc9123b040..52933dddb8242475c39cdb253822ef9d36b5d04f 100644
--- a/dapps/valuetransfers/packages/tangle/missingpayload.go
+++ b/dapps/valuetransfers/packages/tangle/missingpayload.go
@@ -3,10 +3,9 @@ package tangle
 import (
 	"time"
 
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
-
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 // MissingPayload represents a payload that was referenced through branch or trunk but that is missing in our object
diff --git a/dapps/valuetransfers/packages/tangle/objectstorage.go b/dapps/valuetransfers/packages/tangle/objectstorage.go
index 7c1eb49f30893200661a3ee6e198c456d2e44eb2..29fee08f3ce8419119ce562dbe67707adc1cd3c3 100644
--- a/dapps/valuetransfers/packages/tangle/objectstorage.go
+++ b/dapps/valuetransfers/packages/tangle/objectstorage.go
@@ -3,10 +3,9 @@ package tangle
 import (
 	"time"
 
-	"github.com/iotaledger/hive.go/objectstorage"
-
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/hive.go/objectstorage"
 )
 
 const (
@@ -26,7 +25,7 @@ const (
 )
 
 var (
-	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(true, objectstorage.LeakDetectionOptions{
+	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(false, objectstorage.LeakDetectionOptions{
 		MaxConsumersPerObject: 20,
 		MaxConsumerHoldTime:   10 * time.Second,
 	})
diff --git a/dapps/valuetransfers/packages/tangle/output.go b/dapps/valuetransfers/packages/tangle/output.go
index d7430ee6145dd497815f3c66f76f732cb3da4550..faad3a5ca3db3f1d8ea9858477307a2413a8eb4b 100644
--- a/dapps/valuetransfers/packages/tangle/output.go
+++ b/dapps/valuetransfers/packages/tangle/output.go
@@ -4,14 +4,13 @@ import (
 	"sync"
 	"time"
 
-	"github.com/iotaledger/hive.go/marshalutil"
-	"github.com/iotaledger/hive.go/objectstorage"
-	"github.com/iotaledger/hive.go/stringify"
-
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+	"github.com/iotaledger/hive.go/stringify"
 )
 
 // OutputKeyPartitions defines the "layout" of the key. This enables prefix iterations in the objectstorage.
diff --git a/dapps/valuetransfers/packages/tangle/output_test.go b/dapps/valuetransfers/packages/tangle/output_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..47918395b92a13adbf8164933e7cb3c2ebc546e4
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/output_test.go
@@ -0,0 +1,45 @@
+package tangle
+
+import (
+	"testing"
+	"time"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewOutput(t *testing.T) {
+	randomAddress := address.Random()
+	randomTransactionID := transaction.RandomID()
+
+	output := NewOutput(randomAddress, randomTransactionID, branchmanager.MasterBranchID, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 1337),
+	})
+
+	assert.Equal(t, randomAddress, output.Address())
+	assert.Equal(t, randomTransactionID, output.TransactionID())
+	assert.Equal(t, false, output.Solid())
+	assert.Equal(t, time.Time{}, output.SolidificationTime())
+	assert.Equal(t, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 1337),
+	}, output.Balances())
+
+	assert.Equal(t, true, output.setSolid(true))
+	assert.Equal(t, false, output.setSolid(true))
+	assert.Equal(t, true, output.Solid())
+	assert.NotEqual(t, time.Time{}, output.SolidificationTime())
+
+	clonedOutput, _, err := OutputFromBytes(output.Bytes())
+	if err != nil {
+		panic(err)
+	}
+
+	assert.Equal(t, output.Address(), clonedOutput.Address())
+	assert.Equal(t, output.TransactionID(), clonedOutput.TransactionID())
+	assert.Equal(t, output.Solid(), clonedOutput.Solid())
+	assert.Equal(t, output.SolidificationTime().Round(time.Second), clonedOutput.SolidificationTime().Round(time.Second))
+	assert.Equal(t, output.Balances(), clonedOutput.Balances())
+}
diff --git a/dapps/valuetransfers/packages/tangle/payloadapprover.go b/dapps/valuetransfers/packages/tangle/payloadapprover.go
index 8495853c5a1a6e161c0e0a7ae67533cec86e83d5..572249db087a5d3ea065c823b7803e387261a852 100644
--- a/dapps/valuetransfers/packages/tangle/payloadapprover.go
+++ b/dapps/valuetransfers/packages/tangle/payloadapprover.go
@@ -1,10 +1,9 @@
 package tangle
 
 import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
-
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 // PayloadApprover is a database entity, that allows us to keep track of the "tangle structure" by encoding which
diff --git a/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go b/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
index 90bf32bd973b45c265997c51c324cca6d7079dc1..501bcf792c5c193fbe319e10366500f7a8dd6388 100644
--- a/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
+++ b/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
@@ -4,9 +4,8 @@ import (
 	"testing"
 	"time"
 
-	"github.com/stretchr/testify/assert"
-
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/stretchr/testify/assert"
 )
 
 func TestMarshalUnmarshal(t *testing.T) {
diff --git a/dapps/valuetransfers/packages/tangle/tangle.go b/dapps/valuetransfers/packages/tangle/tangle.go
index aed986e4d14741128d9c65d8a5a934621c3d259d..fd1d4e377d036985f9c2c396c15eae176ce3120a 100644
--- a/dapps/valuetransfers/packages/tangle/tangle.go
+++ b/dapps/valuetransfers/packages/tangle/tangle.go
@@ -10,6 +10,7 @@ import (
 	"github.com/iotaledger/hive.go/async"
 	"github.com/iotaledger/hive.go/events"
 	"github.com/iotaledger/hive.go/kvstore"
+	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/types"
 
@@ -49,15 +50,15 @@ func New(store kvstore.KVStore) (tangle *Tangle) {
 	tangle = &Tangle{
 		branchManager: branchmanager.New(store),
 
-		payloadStorage:             osFactory.New(osPayload, osPayloadFactory, objectstorage.CacheTime(time.Second)),
-		payloadMetadataStorage:     osFactory.New(osPayloadMetadata, osPayloadMetadataFactory, objectstorage.CacheTime(time.Second)),
-		missingPayloadStorage:      osFactory.New(osMissingPayload, osMissingPayloadFactory, objectstorage.CacheTime(time.Second)),
-		approverStorage:            osFactory.New(osApprover, osPayloadApproverFactory, objectstorage.CacheTime(time.Second), objectstorage.PartitionKey(payload.IDLength, payload.IDLength), objectstorage.KeysOnly(true)),
-		transactionStorage:         osFactory.New(osTransaction, osTransactionFactory, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
-		transactionMetadataStorage: osFactory.New(osTransactionMetadata, osTransactionMetadataFactory, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
-		attachmentStorage:          osFactory.New(osAttachment, osAttachmentFactory, objectstorage.CacheTime(time.Second), objectstorage.PartitionKey(transaction.IDLength, payload.IDLength), osLeakDetectionOption),
-		outputStorage:              osFactory.New(osOutput, osOutputFactory, OutputKeyPartitions, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
-		consumerStorage:            osFactory.New(osConsumer, osConsumerFactory, ConsumerPartitionKeys, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+		payloadStorage:             osFactory.New(osPayload, osPayloadFactory, objectstorage.CacheTime(1*time.Second)),
+		payloadMetadataStorage:     osFactory.New(osPayloadMetadata, osPayloadMetadataFactory, objectstorage.CacheTime(1*time.Second)),
+		missingPayloadStorage:      osFactory.New(osMissingPayload, osMissingPayloadFactory, objectstorage.CacheTime(1*time.Second)),
+		approverStorage:            osFactory.New(osApprover, osPayloadApproverFactory, objectstorage.CacheTime(1*time.Second), objectstorage.PartitionKey(payload.IDLength, payload.IDLength), objectstorage.KeysOnly(true)),
+		transactionStorage:         osFactory.New(osTransaction, osTransactionFactory, objectstorage.CacheTime(1*time.Second), osLeakDetectionOption),
+		transactionMetadataStorage: osFactory.New(osTransactionMetadata, osTransactionMetadataFactory, objectstorage.CacheTime(1*time.Second), osLeakDetectionOption),
+		attachmentStorage:          osFactory.New(osAttachment, osAttachmentFactory, objectstorage.CacheTime(1*time.Second), objectstorage.PartitionKey(transaction.IDLength, payload.IDLength), osLeakDetectionOption),
+		outputStorage:              osFactory.New(osOutput, osOutputFactory, OutputKeyPartitions, objectstorage.CacheTime(1*time.Second), osLeakDetectionOption),
+		consumerStorage:            osFactory.New(osConsumer, osConsumerFactory, ConsumerPartitionKeys, objectstorage.CacheTime(1*time.Second), osLeakDetectionOption),
 
 		Events: *newEvents(),
 	}
@@ -452,12 +453,12 @@ func (tangle *Tangle) onBranchFinalized(cachedBranch *branchmanager.CachedBranch
 
 // onBranchConfirmed gets triggered when a branch in the branch DAG is marked as confirmed.
 func (tangle *Tangle) onBranchConfirmed(cachedBranch *branchmanager.CachedBranch) {
-	tangle.propagateBranchConfirmedRejectedChangesToTangle(cachedBranch)
+	tangle.propagateBranchConfirmedRejectedChangesToTangle(cachedBranch, true)
 }
 
 // onBranchRejected gets triggered when a branch in the branch DAG is marked as rejected.
 func (tangle *Tangle) onBranchRejected(cachedBranch *branchmanager.CachedBranch) {
-	tangle.propagateBranchConfirmedRejectedChangesToTangle(cachedBranch)
+	tangle.propagateBranchConfirmedRejectedChangesToTangle(cachedBranch, false)
 }
 
 // propagateBranchPreferredChangesToTangle triggers the propagation of preferred status changes of a branch to the value
@@ -518,7 +519,7 @@ func (tangle *Tangle) propagateBranchedLikedChangesToTangle(cachedBranch *branch
 
 // propagateBranchConfirmedRejectedChangesToTangle triggers the propagation of confirmed and rejected status changes of
 // a branch to the value tangle and its UTXO DAG.
-func (tangle *Tangle) propagateBranchConfirmedRejectedChangesToTangle(cachedBranch *branchmanager.CachedBranch) {
+func (tangle *Tangle) propagateBranchConfirmedRejectedChangesToTangle(cachedBranch *branchmanager.CachedBranch, confirmed bool) {
 	cachedBranch.Consume(func(branch *branchmanager.Branch) {
 		if !branch.IsAggregated() {
 			transactionID, _, err := transaction.IDFromBytes(branch.ID().Bytes())
@@ -527,7 +528,7 @@ func (tangle *Tangle) propagateBranchConfirmedRejectedChangesToTangle(cachedBran
 			}
 
 			// propagate changes to future cone of transaction (value tangle)
-			tangle.propagateValuePayloadConfirmedRejectedUpdates(transactionID)
+			tangle.propagateValuePayloadConfirmedRejectedUpdates(transactionID, confirmed)
 		}
 	})
 }
@@ -537,6 +538,8 @@ func (tangle *Tangle) propagateBranchConfirmedRejectedChangesToTangle(cachedBran
 // region PRIVATE UTILITY METHODS //////////////////////////////////////////////////////////////////////////////////////
 
 func (tangle *Tangle) setTransactionFinalized(transactionID transaction.ID, eventSource EventSource) (modified bool, err error) {
+	defer debugger.FunctionCall("setTransactionFinalized", transactionID, eventSource).Return()
+
 	// retrieve metadata and consume
 	cachedTransactionMetadata := tangle.TransactionMetadata(transactionID)
 	cachedTransactionMetadata.Consume(func(metadata *TransactionMetadata) {
@@ -556,7 +559,7 @@ func (tangle *Tangle) setTransactionFinalized(transactionID transaction.ID, even
 			tangle.Events.TransactionFinalized.Trigger(cachedTransaction, cachedTransactionMetadata)
 
 			// propagate the rejected flag
-			if !metadata.Preferred() {
+			if !metadata.Preferred() && !metadata.Rejected() {
 				tangle.propagateRejectedToTransactions(metadata.ID())
 			}
 
@@ -576,7 +579,7 @@ func (tangle *Tangle) setTransactionFinalized(transactionID transaction.ID, even
 				}
 
 				// propagate changes to future cone of transaction (value tangle)
-				tangle.propagateValuePayloadConfirmedRejectedUpdates(transactionID)
+				tangle.propagateValuePayloadConfirmedRejectedUpdates(transactionID, metadata.Preferred())
 			}
 		}
 	})
@@ -586,6 +589,8 @@ func (tangle *Tangle) setTransactionFinalized(transactionID transaction.ID, even
 
 // propagateRejectedToTransactions propagates the rejected flag to a transaction, its outputs and to its consumers.
 func (tangle *Tangle) propagateRejectedToTransactions(transactionID transaction.ID) {
+	defer debugger.FunctionCall("propagateRejectedToTransactions", transactionID).Return()
+
 	// initialize stack with first transaction
 	rejectedPropagationStack := list.New()
 	rejectedPropagationStack.PushBack(transactionID)
@@ -600,11 +605,23 @@ func (tangle *Tangle) propagateRejectedToTransactions(transactionID transaction.
 		rejectedPropagationStack.Remove(firstElement)
 		currentTransactionID := firstElement.Value.(transaction.ID)
 
+		debugger.Print("rejectedPropagationStack.Front()", currentTransactionID)
+
 		cachedTransactionMetadata := tangle.TransactionMetadata(currentTransactionID)
 		cachedTransactionMetadata.Consume(func(metadata *TransactionMetadata) {
 			if !metadata.setRejected(true) {
 				return
 			}
+			metadata.setPreferred(false)
+
+			// if the transaction is not finalized, yet then we set it to finalized
+			if !metadata.Finalized() {
+				if _, err := tangle.setTransactionFinalized(metadata.ID(), EventSourceTangle); err != nil {
+					tangle.Events.Error.Trigger(err)
+
+					return
+				}
+			}
 
 			cachedTransaction := tangle.Transaction(currentTransactionID)
 			cachedTransaction.Consume(func(tx *transaction.Transaction) {
@@ -638,7 +655,9 @@ func (tangle *Tangle) propagateRejectedToTransactions(transactionID transaction.
 }
 
 // TODO: WRITE COMMENT
-func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdates(transactionID transaction.ID) {
+func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdates(transactionID transaction.ID, confirmed bool) {
+	defer debugger.FunctionCall("propagateValuePayloadConfirmedRejectedUpdates", transactionID, confirmed).Return()
+
 	// initiate stack with the attachments of the passed in transaction
 	propagationStack := list.New()
 	tangle.Attachments(transactionID).Consume(func(attachment *Attachment) {
@@ -650,18 +669,15 @@ func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdates(transactionI
 		})
 	})
 
-	// keep track of the seen payloads so we do not process them twice
-	seenPayloads := make(map[payload.ID]types.Empty)
-
 	// iterate through stack (future cone of transactions)
 	for propagationStack.Len() >= 1 {
 		currentAttachmentEntry := propagationStack.Front()
-		tangle.propagateValuePayloadConfirmedRejectedUpdateStackEntry(propagationStack, seenPayloads, currentAttachmentEntry.Value.(*valuePayloadPropagationStackEntry))
+		tangle.propagateValuePayloadConfirmedRejectedUpdateStackEntry(propagationStack, currentAttachmentEntry.Value.(*valuePayloadPropagationStackEntry), confirmed)
 		propagationStack.Remove(currentAttachmentEntry)
 	}
 }
 
-func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdateStackEntry(propagationStack *list.List, processedPayloads map[payload.ID]types.Empty, propagationStackEntry *valuePayloadPropagationStackEntry) {
+func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdateStackEntry(propagationStack *list.List, propagationStackEntry *valuePayloadPropagationStackEntry, confirmed bool) {
 	// release the entry when we are done
 	defer propagationStackEntry.Release()
 
@@ -671,11 +687,13 @@ func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdateStackEntry(pro
 		return
 	}
 
+	defer debugger.FunctionCall("propagateValuePayloadConfirmedRejectedUpdateStackEntry", currentPayload.ID(), currentTransaction.ID()).Return()
+
 	// perform different logic depending on the type of the change (liked vs dislike)
-	switch currentTransactionMetadata.Preferred() {
+	switch confirmed {
 	case true:
 		// abort if the transaction is not preferred, the branch of the payload is not liked, the referenced value payloads are not liked or the payload was marked as liked before
-		if !currentTransactionMetadata.Finalized() || !tangle.BranchManager().IsBranchConfirmed(currentPayloadMetadata.BranchID()) || !tangle.ValuePayloadsConfirmed(currentPayload.TrunkID(), currentPayload.BranchID()) || !currentPayloadMetadata.setConfirmed(true) {
+		if !currentTransactionMetadata.Preferred() || !currentTransactionMetadata.Finalized() || !tangle.BranchManager().IsBranchConfirmed(currentPayloadMetadata.BranchID()) || !tangle.ValuePayloadsConfirmed(currentPayload.TrunkID(), currentPayload.BranchID()) || !currentPayloadMetadata.setConfirmed(true) {
 			return
 		}
 
@@ -695,6 +713,11 @@ func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdateStackEntry(pro
 			tangle.Events.TransactionConfirmed.Trigger(propagationStackEntry.CachedTransaction, propagationStackEntry.CachedTransactionMetadata)
 		}
 	case false:
+		// abort if transaction is not finalized and neither of parents is rejected
+		if !currentTransactionMetadata.Finalized() && !(tangle.payloadRejected(currentPayload.BranchID()) || tangle.payloadRejected(currentPayload.TrunkID())) {
+			return
+		}
+
 		// abort if the payload has been marked as disliked before
 		if !currentPayloadMetadata.setRejected(true) {
 			return
@@ -704,7 +727,7 @@ func (tangle *Tangle) propagateValuePayloadConfirmedRejectedUpdateStackEntry(pro
 	}
 
 	// schedule checks of approvers and consumers
-	tangle.ForEachConsumersAndApprovers(currentPayload, tangle.createValuePayloadFutureConeIterator(propagationStack, processedPayloads))
+	tangle.ForEachConsumersAndApprovers(currentPayload, tangle.createValuePayloadFutureConeIterator(propagationStack, make(map[payload.ID]types.Empty)))
 }
 
 // setTransactionPreferred is an internal utility method that updates the preferred flag and triggers changes to the
@@ -771,13 +794,10 @@ func (tangle *Tangle) propagateValuePayloadLikeUpdates(transactionID transaction
 		})
 	})
 
-	// keep track of the seen payloads so we do not process them twice
-	seenPayloads := make(map[payload.ID]types.Empty)
-
 	// iterate through stack (future cone of transactions)
 	for propagationStack.Len() >= 1 {
 		currentAttachmentEntry := propagationStack.Front()
-		tangle.processValuePayloadLikedUpdateStackEntry(propagationStack, seenPayloads, liked, currentAttachmentEntry.Value.(*valuePayloadPropagationStackEntry))
+		tangle.processValuePayloadLikedUpdateStackEntry(propagationStack, liked, currentAttachmentEntry.Value.(*valuePayloadPropagationStackEntry))
 		propagationStack.Remove(currentAttachmentEntry)
 	}
 }
@@ -786,7 +806,7 @@ func (tangle *Tangle) propagateValuePayloadLikeUpdates(transactionID transaction
 // propagation stack for the update of the liked flag when iterating through the future cone of a transactions
 // attachments. It checks if a ValuePayloads has become liked (or disliked), updates the flag an schedules its future
 // cone for additional checks.
-func (tangle *Tangle) processValuePayloadLikedUpdateStackEntry(propagationStack *list.List, processedPayloads map[payload.ID]types.Empty, liked bool, propagationStackEntry *valuePayloadPropagationStackEntry) {
+func (tangle *Tangle) processValuePayloadLikedUpdateStackEntry(propagationStack *list.List, liked bool, propagationStackEntry *valuePayloadPropagationStackEntry) {
 	// release the entry when we are done
 	defer propagationStackEntry.Release()
 
@@ -855,7 +875,7 @@ func (tangle *Tangle) processValuePayloadLikedUpdateStackEntry(propagationStack
 	}
 
 	// schedule checks of approvers and consumers
-	tangle.ForEachConsumersAndApprovers(currentPayload, tangle.createValuePayloadFutureConeIterator(propagationStack, processedPayloads))
+	tangle.ForEachConsumersAndApprovers(currentPayload, tangle.createValuePayloadFutureConeIterator(propagationStack, make(map[payload.ID]types.Empty)))
 }
 
 // createValuePayloadFutureConeIterator returns a function that can be handed into the ForEachConsumersAndApprovers
@@ -891,13 +911,20 @@ func (tangle *Tangle) createValuePayloadFutureConeIterator(propagationStack *lis
 	}
 }
 
+func (tangle *Tangle) payloadRejected(payloadID payload.ID) (rejected bool) {
+	tangle.PayloadMetadata(payloadID).Consume(func(payloadMetadata *PayloadMetadata) {
+		rejected = payloadMetadata.Rejected()
+	})
+	return
+}
+
 func (tangle *Tangle) storePayload(payloadToStore *payload.Payload) (cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, payloadStored bool) {
-	storedTransaction, transactionIsNew := tangle.payloadStorage.StoreIfAbsent(payloadToStore)
-	if !transactionIsNew {
+	storedPayload, newPayload := tangle.payloadStorage.StoreIfAbsent(payloadToStore)
+	if !newPayload {
 		return
 	}
 
-	cachedPayload = &payload.CachedPayload{CachedObject: storedTransaction}
+	cachedPayload = &payload.CachedPayload{CachedObject: storedPayload}
 	cachedMetadata = &CachedPayloadMetadata{CachedObject: tangle.payloadMetadataStorage.Store(NewPayloadMetadata(payloadToStore.ID()))}
 	payloadStored = true
 
@@ -960,20 +987,147 @@ func (tangle *Tangle) solidifyPayload(cachedPayload *payload.CachedPayload, cach
 		CachedTransactionMetadata: cachedTransactionMetadata,
 	})
 
-	// keep track of the added payloads so we do not add them multiple times
-	processedPayloads := make(map[payload.ID]types.Empty)
-
 	// process payloads that are supposed to be checked for solidity recursively
 	for solidificationStack.Len() > 0 {
 		currentSolidificationEntry := solidificationStack.Front()
-		tangle.processSolidificationStackEntry(solidificationStack, processedPayloads, currentSolidificationEntry.Value.(*valuePayloadPropagationStackEntry))
+		tangle.processSolidificationStackEntry(solidificationStack, currentSolidificationEntry.Value.(*valuePayloadPropagationStackEntry))
 		solidificationStack.Remove(currentSolidificationEntry)
 	}
 }
 
+// deleteTransactionFutureCone removes a transaction and its whole future cone from the database (including all of the
+// reference models).
+func (tangle *Tangle) deleteTransactionFutureCone(transactionID transaction.ID) {
+	// initialize stack with current transaction
+	deleteStack := list.New()
+	deleteStack.PushBack(transactionID)
+
+	// iterate through stack
+	for deleteStack.Len() >= 1 {
+		// pop first element from stack
+		currentTransactionIDEntry := deleteStack.Front()
+		deleteStack.Remove(currentTransactionIDEntry)
+		currentTransactionID := currentTransactionIDEntry.Value.(transaction.ID)
+
+		// delete the transaction
+		consumers, attachments := tangle.deleteTransaction(currentTransactionID)
+
+		// queue consumers to also be deleted
+		for _, consumer := range consumers {
+			deleteStack.PushBack(consumer)
+		}
+
+		// remove payload future cone
+		for _, attachingPayloadID := range attachments {
+			tangle.deletePayloadFutureCone(attachingPayloadID)
+		}
+	}
+}
+
+// deleteTransaction deletes a single transaction and all of its related models from the database.
+// Note: We do not immediately remove the attachments as this is related to the Payloads and is therefore left to the
+//       caller to clean this up.
+func (tangle *Tangle) deleteTransaction(transactionID transaction.ID) (consumers []transaction.ID, attachments []payload.ID) {
+	// create result
+	consumers = make([]transaction.ID, 0)
+	attachments = make([]payload.ID, 0)
+
+	// process transaction and its models
+	tangle.Transaction(transactionID).Consume(func(tx *transaction.Transaction) {
+		// mark transaction as deleted
+		tx.Delete()
+
+		// cleanup inputs
+		tx.Inputs().ForEach(func(outputId transaction.OutputID) bool {
+			// delete consumer pointers of the inputs of the current transaction
+			tangle.consumerStorage.Delete(marshalutil.New(transaction.OutputIDLength + transaction.IDLength).WriteBytes(outputId.Bytes()).WriteBytes(transactionID.Bytes()).Bytes())
+
+			return true
+		})
+
+		// introduce map to keep track of seen consumers (so we don't process them twice)
+		seenConsumers := make(map[transaction.ID]types.Empty)
+		seenConsumers[transactionID] = types.Void
+
+		// cleanup outputs
+		tx.Outputs().ForEach(func(addr address.Address, balances []*balance.Balance) bool {
+			// delete outputs
+			tangle.outputStorage.Delete(marshalutil.New(address.Length + transaction.IDLength).WriteBytes(addr.Bytes()).WriteBytes(transactionID.Bytes()).Bytes())
+
+			// process consumers
+			tangle.Consumers(transaction.NewOutputID(addr, transactionID)).Consume(func(consumer *Consumer) {
+				// check if the transaction has been queued already
+				if _, consumerSeenAlready := seenConsumers[consumer.TransactionID()]; consumerSeenAlready {
+					return
+				}
+				seenConsumers[consumer.TransactionID()] = types.Void
+
+				// queue consumers for deletion
+				consumers = append(consumers, consumer.TransactionID())
+			})
+
+			return true
+		})
+	})
+
+	// delete transaction metadata
+	tangle.transactionMetadataStorage.Delete(transactionID.Bytes())
+
+	// process attachments
+	tangle.Attachments(transactionID).Consume(func(attachment *Attachment) {
+		attachments = append(attachments, attachment.PayloadID())
+	})
+
+	return
+}
+
+// deletePayloadFutureCone removes a payload and its whole future cone from the database (including all of the reference
+// models).
+func (tangle *Tangle) deletePayloadFutureCone(payloadID payload.ID) {
+	// initialize stack with current transaction
+	deleteStack := list.New()
+	deleteStack.PushBack(payloadID)
+
+	// iterate through stack
+	for deleteStack.Len() >= 1 {
+		// pop first element from stack
+		currentTransactionIDEntry := deleteStack.Front()
+		deleteStack.Remove(currentTransactionIDEntry)
+		currentPayloadID := currentTransactionIDEntry.Value.(payload.ID)
+
+		// process payload
+		tangle.Payload(currentPayloadID).Consume(func(currentPayload *payload.Payload) {
+			// delete payload
+			currentPayload.Delete()
+
+			// delete approvers
+			tangle.approverStorage.Delete(marshalutil.New(2 * payload.IDLength).WriteBytes(currentPayload.BranchID().Bytes()).WriteBytes(currentPayloadID.Bytes()).Bytes())
+			if currentPayload.TrunkID() != currentPayload.BranchID() {
+				tangle.approverStorage.Delete(marshalutil.New(2 * payload.IDLength).WriteBytes(currentPayload.TrunkID().Bytes()).WriteBytes(currentPayloadID.Bytes()).Bytes())
+			}
+
+			// delete attachment
+			tangle.attachmentStorage.Delete(marshalutil.New(transaction.IDLength + payload.IDLength).WriteBytes(currentPayload.Transaction().ID().Bytes()).WriteBytes(currentPayloadID.Bytes()).Bytes())
+
+			// if this was the last attachment of the transaction then we also delete the transaction
+			if !tangle.Attachments(currentPayload.Transaction().ID()).Consume(func(attachment *Attachment) {}) {
+				tangle.deleteTransaction(currentPayload.Transaction().ID())
+			}
+		})
+
+		// delete payload metadata
+		tangle.payloadMetadataStorage.Delete(currentPayloadID.Bytes())
+
+		// queue approvers
+		tangle.Approvers(currentPayloadID).Consume(func(approver *PayloadApprover) {
+			deleteStack.PushBack(approver.ApprovingPayloadID())
+		})
+	}
+}
+
 // processSolidificationStackEntry processes a single entry of the solidification stack and schedules its approvers and
 // consumers if necessary.
-func (tangle *Tangle) processSolidificationStackEntry(solidificationStack *list.List, processedPayloads map[payload.ID]types.Empty, solidificationStackEntry *valuePayloadPropagationStackEntry) {
+func (tangle *Tangle) processSolidificationStackEntry(solidificationStack *list.List, solidificationStackEntry *valuePayloadPropagationStackEntry) {
 	// release stack entry when we are done
 	defer solidificationStackEntry.Release()
 
@@ -986,7 +1140,9 @@ func (tangle *Tangle) processSolidificationStackEntry(solidificationStack *list.
 	// abort if the transaction is not solid or invalid
 	transactionSolid, consumedBranches, transactionSolidityErr := tangle.checkTransactionSolidity(currentTransaction, currentTransactionMetadata)
 	if transactionSolidityErr != nil {
-		// TODO: TRIGGER INVALID TX + REMOVE TXS + PAYLOADS THAT APPROVE IT
+		tangle.Events.TransactionInvalid.Trigger(solidificationStackEntry.CachedTransaction, solidificationStackEntry.CachedTransactionMetadata, transactionSolidityErr)
+
+		tangle.deleteTransactionFutureCone(currentTransaction.ID())
 
 		return
 	}
@@ -995,9 +1151,11 @@ func (tangle *Tangle) processSolidificationStackEntry(solidificationStack *list.
 	}
 
 	// abort if the payload is not solid or invalid
-	payloadSolid, payloadSolidityErr := tangle.checkPayloadSolidity(currentPayload, currentPayloadMetadata, consumedBranches)
+	payloadSolid, payloadSolidityErr := tangle.payloadBecameNewlySolid(currentPayload, currentPayloadMetadata, consumedBranches)
 	if payloadSolidityErr != nil {
-		// TODO: TRIGGER INVALID TX + REMOVE TXS + PAYLOADS THAT APPROVE IT
+		tangle.Events.PayloadInvalid.Trigger(solidificationStackEntry.CachedPayload, solidificationStackEntry.CachedPayloadMetadata, payloadSolidityErr)
+
+		tangle.deletePayloadFutureCone(currentPayload.ID())
 
 		return
 	}
@@ -1013,6 +1171,9 @@ func (tangle *Tangle) processSolidificationStackEntry(solidificationStack *list.
 		return
 	}
 
+	// keep track of the added payloads so we do not add them multiple times
+	processedPayloads := make(map[payload.ID]types.Empty)
+
 	// trigger events and schedule check of approvers / consumers
 	if transactionBooked {
 		tangle.Events.TransactionBooked.Trigger(solidificationStackEntry.CachedTransaction, solidificationStackEntry.CachedTransactionMetadata, decisionPending)
@@ -1058,11 +1219,14 @@ func (tangle *Tangle) bookTransaction(cachedTransaction *transaction.CachedTrans
 		return
 	}
 
-	// abort if this transaction was booked by another process already
+	// abort if transaction was marked as solid before
 	if !transactionMetadata.SetSolid(true) {
 		return
 	}
 
+	// trigger event if transaction became solid
+	tangle.Events.TransactionSolid.Trigger(cachedTransaction, cachedTransactionMetadata)
+
 	consumedBranches := make(branchmanager.BranchIds)
 	conflictingInputs := make([]transaction.OutputID, 0)
 	conflictingInputsOfFirstConsumers := make(map[transaction.ID][]transaction.OutputID)
@@ -1197,6 +1361,14 @@ func (tangle *Tangle) bookPayload(cachedPayload *payload.CachedPayload, cachedPa
 		return
 	}
 
+	// abort if the payload has been marked as solid before
+	if !valueObjectMetadata.setSolid(true) {
+		return
+	}
+
+	// trigger event if payload became solid
+	tangle.Events.PayloadSolid.Trigger(cachedPayload, cachedPayloadMetadata)
+
 	cachedAggregatedBranch, err := tangle.BranchManager().AggregateBranches([]branchmanager.BranchID{branchBranchID, trunkBranchID, transactionBranchID}...)
 	if err != nil {
 		return
@@ -1224,9 +1396,8 @@ func (tangle *Tangle) payloadBranchID(payloadID payload.ID) branchmanager.Branch
 
 	payloadMetadata := cachedPayloadMetadata.Unwrap()
 	if payloadMetadata == nil {
-		cachedPayloadMetadata.Release()
 
-		// if transaction is missing and was not reported as missing, yet
+		// if payload is missing and was not reported as missing, yet
 		if cachedMissingPayload, missingPayloadStored := tangle.missingPayloadStorage.StoreIfAbsent(NewMissingPayload(payloadID)); missingPayloadStored {
 			cachedMissingPayload.Consume(func(object objectstorage.StorableObject) {
 				tangle.Events.PayloadMissing.Trigger(object.(*MissingPayload).ID())
@@ -1240,28 +1411,29 @@ func (tangle *Tangle) payloadBranchID(payloadID payload.ID) branchmanager.Branch
 	return payloadMetadata.BranchID()
 }
 
-// checkPayloadSolidity returns true if the given payload is solid. A payload is considered to be solid solid, if it is either
-// already marked as solid or if its referenced payloads are marked as solid.
-func (tangle *Tangle) checkPayloadSolidity(payload *payload.Payload, payloadMetadata *PayloadMetadata, transactionBranches []branchmanager.BranchID) (solid bool, err error) {
-	if payload == nil || payload.IsDeleted() || payloadMetadata == nil || payloadMetadata.IsDeleted() {
+// payloadBecameNewlySolid returns true if the given payload is solid but was not marked as solid. yet.
+func (tangle *Tangle) payloadBecameNewlySolid(p *payload.Payload, payloadMetadata *PayloadMetadata, transactionBranches []branchmanager.BranchID) (solid bool, err error) {
+	// abort if the payload was deleted
+	if p == nil || p.IsDeleted() || payloadMetadata == nil || payloadMetadata.IsDeleted() {
 		return
 	}
 
-	if solid = payloadMetadata.IsSolid(); solid {
+	// abort if the payload was marked as solid already
+	if payloadMetadata.IsSolid() {
 		return
 	}
 
 	combinedBranches := transactionBranches
 
-	trunkBranchID := tangle.payloadBranchID(payload.TrunkID())
+	trunkBranchID := tangle.payloadBranchID(p.TrunkID())
 	if trunkBranchID == branchmanager.UndefinedBranchID {
-		return
+		return false, nil
 	}
 	combinedBranches = append(combinedBranches, trunkBranchID)
 
-	branchBranchID := tangle.payloadBranchID(payload.BranchID())
+	branchBranchID := tangle.payloadBranchID(p.BranchID())
 	if branchBranchID == branchmanager.UndefinedBranchID {
-		return
+		return false, nil
 	}
 	combinedBranches = append(combinedBranches, branchBranchID)
 
@@ -1270,9 +1442,9 @@ func (tangle *Tangle) checkPayloadSolidity(payload *payload.Payload, payloadMeta
 		return
 	}
 	if branchesConflicting {
-		err = fmt.Errorf("the payload '%s' combines conflicting versions of the ledger state", payload.ID())
+		err = fmt.Errorf("the payload '%s' combines conflicting versions of the ledger state", p.ID())
 
-		return
+		return false, err
 	}
 
 	solid = true
@@ -1287,8 +1459,10 @@ func (tangle *Tangle) checkTransactionSolidity(tx *transaction.Transaction, meta
 	}
 
 	// abort if we have previously determined the solidity status of the transaction already
-	if solid = metadata.Solid(); solid {
-		consumedBranches = []branchmanager.BranchID{metadata.BranchID()}
+	if metadata.Solid() {
+		if solid = metadata.BranchID() != branchmanager.UndefinedBranchID; solid {
+			consumedBranches = []branchmanager.BranchID{metadata.BranchID()}
+		}
 
 		return
 	}
@@ -1405,7 +1579,7 @@ func (tangle *Tangle) checkTransactionOutputs(inputBalances map[balance.Color]in
 				continue
 			}
 
-			// sidestep logic if we have a newly colored output (we check the supply later)
+			// sidestep logic if we have ColorIOTA
 			if outputBalance.Color() == balance.ColorIOTA {
 				// catch overflows
 				if uncoloredCoins > math.MaxInt64-outputBalance.Value() {
@@ -1543,6 +1717,9 @@ func (tangle *Tangle) moveTransactionToBranch(cachedTransaction *transaction.Cac
 						return nil
 					}
 
+					// update the payloads
+					tangle.updateBranchOfValuePayloadsAttachingTransaction(currentTransactionMetadata.ID())
+
 					// iterate through the outputs of the moved transaction
 					currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
 						// create reference to the output
@@ -1592,6 +1769,83 @@ func (tangle *Tangle) moveTransactionToBranch(cachedTransaction *transaction.Cac
 	return
 }
 
+// updateBranchOfValuePayloadsAttachingTransaction updates the BranchID of all payloads that attach a certain
+// transaction (and its approvers).
+func (tangle *Tangle) updateBranchOfValuePayloadsAttachingTransaction(transactionID transaction.ID) {
+	// initialize stack with the attachments of the given transaction
+	payloadStack := list.New()
+	tangle.Attachments(transactionID).Consume(func(attachment *Attachment) {
+		payloadStack.PushBack(tangle.Payload(attachment.PayloadID()))
+	})
+
+	// iterate through the stack to update all payloads we found
+	for payloadStack.Len() >= 1 {
+		// pop the first element from the stack
+		currentPayloadElement := payloadStack.Front()
+		payloadStack.Remove(currentPayloadElement)
+
+		// process the found element
+		currentPayloadElement.Value.(*payload.CachedPayload).Consume(func(currentPayload *payload.Payload) {
+			// determine branches of referenced payloads
+			branchIDofBranch := tangle.branchIDofPayload(currentPayload.BranchID())
+			branchIDofTrunk := tangle.branchIDofPayload(currentPayload.TrunkID())
+
+			// determine branch of contained transaction
+			var branchIDofTransaction branchmanager.BranchID
+			if !tangle.TransactionMetadata(currentPayload.Transaction().ID()).Consume(func(metadata *TransactionMetadata) {
+				branchIDofTransaction = metadata.BranchID()
+			}) {
+				return
+			}
+
+			// abort if any of the branches is undefined
+			if branchIDofBranch == branchmanager.UndefinedBranchID || branchIDofTrunk == branchmanager.UndefinedBranchID || branchIDofTransaction == branchmanager.UndefinedBranchID {
+				return
+			}
+
+			// aggregate the branches or abort if we face an error
+			cachedAggregatedBranch, err := tangle.branchManager.AggregateBranches(branchIDofBranch, branchIDofTrunk, branchIDofTransaction)
+			if err != nil {
+				tangle.Events.Error.Trigger(err)
+
+				return
+			}
+
+			// try to update the metadata of the payload and queue its approvers
+			cachedAggregatedBranch.Consume(func(branch *branchmanager.Branch) {
+				tangle.PayloadMetadata(currentPayload.ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+					if !payloadMetadata.SetBranchID(branch.ID()) {
+						return
+					}
+
+					// queue approvers for recursive updates
+					tangle.ForeachApprovers(currentPayload.ID(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, transaction *transaction.CachedTransaction, transactionMetadata *CachedTransactionMetadata) {
+						payloadMetadata.Release()
+						transaction.Release()
+						transactionMetadata.Release()
+
+						payloadStack.PushBack(payload)
+					})
+				})
+
+			})
+		})
+	}
+}
+
+// branchIDofPayload returns the BranchID that a payload is booked into.
+func (tangle *Tangle) branchIDofPayload(payloadID payload.ID) (branchID branchmanager.BranchID) {
+	if payloadID == payload.GenesisID {
+		return branchmanager.MasterBranchID
+	}
+
+	tangle.PayloadMetadata(payloadID).Consume(func(payloadMetadata *PayloadMetadata) {
+		branchID = payloadMetadata.BranchID()
+	})
+
+	return
+}
+
 func (tangle *Tangle) calculateBranchOfTransaction(currentTransaction *transaction.Transaction) (branch *branchmanager.CachedBranch, err error) {
 	consumedBranches := make(branchmanager.BranchIds)
 	if !currentTransaction.Inputs().ForEach(func(outputId transaction.OutputID) bool {
diff --git a/dapps/valuetransfers/packages/tangle/tangle_concurrency_test.go b/dapps/valuetransfers/packages/tangle/tangle_concurrency_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf97f7bb86f032fe835e6231612efdc3cd1aea7f
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/tangle_concurrency_test.go
@@ -0,0 +1,345 @@
+package tangle
+
+import (
+	"sync"
+	"testing"
+
+	"github.com/iotaledger/hive.go/kvstore/mapdb"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tipmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+)
+
+func TestConcurrency(t *testing.T) {
+	// img/concurrency.png
+	// Builds a simple UTXO-DAG where each transaction spends exactly 1 output from genesis.
+	// Tips are concurrently selected (via TipManager) resulting in a moderately wide tangle depending on `threads`.
+	tangle := New(mapdb.NewMapDB())
+	defer tangle.Shutdown()
+
+	tipManager := tipmanager.New()
+
+	count := 1000
+	threads := 10
+	countTotal := threads * count
+
+	// initialize tangle with genesis block
+	outputs := make(map[address.Address][]*balance.Balance)
+	for i := 0; i < countTotal; i++ {
+		outputs[address.Random()] = []*balance.Balance{
+			balance.New(balance.ColorIOTA, 1),
+		}
+	}
+	inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+	transactions := make([]*transaction.Transaction, countTotal)
+	valueObjects := make([]*payload.Payload, countTotal)
+
+	// start threads, each working on its chunk of transaction and valueObjects
+	var wg sync.WaitGroup
+	for thread := 0; thread < threads; thread++ {
+		wg.Add(1)
+		go func(threadNo int) {
+			defer wg.Done()
+
+			start := threadNo * count
+			end := start + count
+
+			for i := start; i < end; i++ {
+				// issue transaction moving funds from genesis
+				tx := transaction.New(
+					transaction.NewInputs(inputIDs[i]),
+					transaction.NewOutputs(
+						map[address.Address][]*balance.Balance{
+							address.Random(): {
+								balance.New(balance.ColorIOTA, 1),
+							},
+						}),
+				)
+				// use random value objects as tips (possibly created in other threads)
+				parent1, parent2 := tipManager.Tips()
+				valueObject := payload.New(parent1, parent2, tx)
+
+				tangle.AttachPayloadSync(valueObject)
+
+				tipManager.AddTip(valueObject)
+				transactions[i] = tx
+				valueObjects[i] = valueObject
+			}
+		}(thread)
+	}
+
+	wg.Wait()
+
+	// verify correctness
+	for i := 0; i < countTotal; i++ {
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions[i].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Truef(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equalf(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects[i].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.Truef(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equalf(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if outputs are found in database
+		transactions[i].Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+			cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(address, transactions[i].ID()))
+			assert.True(t, cachedOutput.Consume(func(output *Output) {
+				assert.Equalf(t, 0, output.ConsumerCount(), "the output should not be spent")
+				assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1)}, output.Balances())
+				assert.Equalf(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+				assert.Truef(t, output.Solid(), "the output is not solid")
+			}))
+			return true
+		})
+
+		// check that all inputs are consumed exactly once
+		cachedInput := tangle.TransactionOutput(inputIDs[i])
+		assert.True(t, cachedInput.Consume(func(output *Output) {
+			assert.Equalf(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1)}, output.Balances())
+			assert.Equalf(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.Truef(t, output.Solid(), "the output is not solid")
+		}))
+	}
+}
+
+func TestReverseValueObjectSolidification(t *testing.T) {
+	// img/reverse-valueobject-solidification.png
+	// Builds a simple UTXO-DAG where each transaction spends exactly 1 output from genesis.
+	// All value objects reference the previous value object, effectively creating a chain.
+	// The test attaches the prepared value objects concurrently in reverse order.
+	tangle := New(mapdb.NewMapDB())
+	defer tangle.Shutdown()
+
+	tipManager := tipmanager.New()
+
+	count := 1000
+	threads := 5
+	countTotal := threads * count
+
+	// initialize tangle with genesis block
+	outputs := make(map[address.Address][]*balance.Balance)
+	for i := 0; i < countTotal; i++ {
+		outputs[address.Random()] = []*balance.Balance{
+			balance.New(balance.ColorIOTA, 1),
+		}
+	}
+	inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+	transactions := make([]*transaction.Transaction, countTotal)
+	valueObjects := make([]*payload.Payload, countTotal)
+
+	// prepare value objects
+	for i := 0; i < countTotal; i++ {
+		tx := transaction.New(
+			// issue transaction moving funds from genesis
+			transaction.NewInputs(inputIDs[i]),
+			transaction.NewOutputs(
+				map[address.Address][]*balance.Balance{
+					address.Random(): {
+						balance.New(balance.ColorIOTA, 1),
+					},
+				}),
+		)
+		parent1, parent2 := tipManager.Tips()
+		valueObject := payload.New(parent1, parent2, tx)
+
+		tipManager.AddTip(valueObject)
+		transactions[i] = tx
+		valueObjects[i] = valueObject
+	}
+
+	// attach value objects in reverse order
+	var wg sync.WaitGroup
+	for thread := 0; thread < threads; thread++ {
+		wg.Add(1)
+		go func(threadNo int) {
+			defer wg.Done()
+
+			for i := countTotal - 1 - threadNo; i >= 0; i -= threads {
+				valueObject := valueObjects[i]
+				tangle.AttachPayloadSync(valueObject)
+			}
+		}(thread)
+	}
+	wg.Wait()
+
+	// verify correctness
+	for i := 0; i < countTotal; i++ {
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions[i].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Truef(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equalf(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects[i].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.Truef(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equalf(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if outputs are found in database
+		transactions[i].Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+			cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(address, transactions[i].ID()))
+			assert.True(t, cachedOutput.Consume(func(output *Output) {
+				assert.Equalf(t, 0, output.ConsumerCount(), "the output should not be spent")
+				assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1)}, output.Balances())
+				assert.Equalf(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+				assert.Truef(t, output.Solid(), "the output is not solid")
+			}))
+			return true
+		})
+
+		// check that all inputs are consumed exactly once
+		cachedInput := tangle.TransactionOutput(inputIDs[i])
+		assert.True(t, cachedInput.Consume(func(output *Output) {
+			assert.Equalf(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1)}, output.Balances())
+			assert.Equalf(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.Truef(t, output.Solid(), "the output is not solid")
+		}))
+	}
+}
+
+func TestReverseTransactionSolidification(t *testing.T) {
+	testIterations := 500
+
+	// repeat the test a few times
+	for k := 0; k < testIterations; k++ {
+		// img/reverse-transaction-solidification.png
+		// Builds a UTXO-DAG with `txChains` spending outputs from the corresponding chain.
+		// All value objects reference the previous value object, effectively creating a chain.
+		// The test attaches the prepared value objects concurrently in reverse order.
+
+		tangle := New(mapdb.NewMapDB())
+
+		tipManager := tipmanager.New()
+
+		txChains := 2
+		count := 10
+		threads := 5
+		countTotal := txChains * threads * count
+
+		// initialize tangle with genesis block
+		outputs := make(map[address.Address][]*balance.Balance)
+		for i := 0; i < txChains; i++ {
+			outputs[address.Random()] = []*balance.Balance{
+				balance.New(balance.ColorIOTA, 1),
+			}
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		transactions := make([]*transaction.Transaction, countTotal)
+		valueObjects := make([]*payload.Payload, countTotal)
+
+		// create chains of transactions
+		for i := 0; i < count*threads; i++ {
+			for j := 0; j < txChains; j++ {
+				var tx *transaction.Transaction
+
+				// transferring from genesis
+				if i == 0 {
+					tx = transaction.New(
+						transaction.NewInputs(inputIDs[j]),
+						transaction.NewOutputs(
+							map[address.Address][]*balance.Balance{
+								address.Random(): {
+									balance.New(balance.ColorIOTA, 1),
+								},
+							}),
+					)
+				} else {
+					// create chains in UTXO dag
+					tx = transaction.New(
+						getTxOutputsAsInputs(transactions[i*txChains-txChains+j]),
+						transaction.NewOutputs(
+							map[address.Address][]*balance.Balance{
+								address.Random(): {
+									balance.New(balance.ColorIOTA, 1),
+								},
+							}),
+					)
+				}
+
+				transactions[i*txChains+j] = tx
+			}
+		}
+
+		// prepare value objects (simple chain)
+		for i := 0; i < countTotal; i++ {
+			parent1, parent2 := tipManager.Tips()
+			valueObject := payload.New(parent1, parent2, transactions[i])
+
+			tipManager.AddTip(valueObject)
+			valueObjects[i] = valueObject
+		}
+
+		// attach value objects in reverse order
+		var wg sync.WaitGroup
+		for thread := 0; thread < threads; thread++ {
+			wg.Add(1)
+			go func(threadNo int) {
+				defer wg.Done()
+
+				for i := countTotal - 1 - threadNo; i >= 0; i -= threads {
+					valueObject := valueObjects[i]
+					tangle.AttachPayloadSync(valueObject)
+				}
+			}(thread)
+		}
+		wg.Wait()
+
+		// verify correctness
+		for i := 0; i < countTotal; i++ {
+			// check if transaction metadata is found in database
+			require.Truef(t, tangle.TransactionMetadata(transactions[i].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+				require.Truef(t, transactionMetadata.Solid(), "the transaction %s is not solid", transactions[i].ID().String())
+				require.Equalf(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+			}), "transaction metadata %s not found in database", transactions[i].ID())
+
+			// check if value object metadata is found in database
+			require.Truef(t, tangle.PayloadMetadata(valueObjects[i].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+				require.Truef(t, payloadMetadata.IsSolid(), "the payload %s is not solid", valueObjects[i].ID())
+				require.Equalf(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			}), "value object metadata %s not found in database", valueObjects[i].ID())
+
+			// check if outputs are found in database
+			transactions[i].Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+				cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(address, transactions[i].ID()))
+				require.Truef(t, cachedOutput.Consume(func(output *Output) {
+					// only the last outputs in chain should not be spent
+					if i+txChains >= countTotal {
+						require.Equalf(t, 0, output.ConsumerCount(), "the output should not be spent")
+					} else {
+						require.Equalf(t, 1, output.ConsumerCount(), "the output should be spent")
+					}
+					require.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1)}, output.Balances())
+					require.Equalf(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+					require.Truef(t, output.Solid(), "the output is not solid")
+				}), "output not found in database for tx %s", transactions[i])
+				return true
+			})
+		}
+	}
+}
+
+func getTxOutputsAsInputs(tx *transaction.Transaction) *transaction.Inputs {
+	outputIDs := make([]transaction.OutputID, 0)
+	tx.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+		outputIDs = append(outputIDs, transaction.NewOutputID(address, tx.ID()))
+		return true
+	})
+
+	return transaction.NewInputs(outputIDs...)
+}
diff --git a/dapps/valuetransfers/packages/tangle/tangle_scenario_test.go b/dapps/valuetransfers/packages/tangle/tangle_scenario_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..63a8889e3eb4bd02e0a2a7de51cf75599643f3a3
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/tangle_scenario_test.go
@@ -0,0 +1,1054 @@
+package tangle
+
+import (
+	"testing"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/wallet"
+	"github.com/iotaledger/hive.go/kvstore/mapdb"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+const (
+	GENESIS uint64 = iota
+	A
+	B
+	C
+	D
+	E
+	F
+	G
+	H
+	I
+	J
+	Y
+)
+
+// TODO: clean up create scenario with some helper functions: DRY!
+
+// preparePropagationScenario1 creates a tangle according to `img/scenario1.png`.
+func preparePropagationScenario1(t *testing.T) (*Tangle, map[string]*transaction.Transaction, map[string]*payload.Payload, map[string]branchmanager.BranchID, *wallet.Seed) {
+	// create tangle
+	tangle := New(mapdb.NewMapDB())
+
+	// create seed for testing
+	seed := wallet.NewSeed()
+
+	// initialize tangle with genesis block (+GENESIS)
+	tangle.LoadSnapshot(map[transaction.ID]map[address.Address][]*balance.Balance{
+		transaction.GenesisID: {
+			seed.Address(GENESIS): {
+				balance.New(balance.ColorIOTA, 3333),
+			},
+		},
+	})
+
+	// create dictionaries so we can address the created entities by their aliases from the picture
+	transactions := make(map[string]*transaction.Transaction)
+	valueObjects := make(map[string]*payload.Payload)
+	branches := make(map[string]branchmanager.BranchID)
+
+	// [-GENESIS, A+, B+, C+]
+	{
+		// create transaction + payload
+		transactions["[-GENESIS, A+, B+, C+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(GENESIS), transaction.GenesisID),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(A): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+				seed.Address(B): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+				seed.Address(C): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+			}),
+		)
+		transactions["[-GENESIS, A+, B+, C+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(GENESIS)))
+		valueObjects["[-GENESIS, A+, B+, C+]"] = payload.New(payload.GenesisID, payload.GenesisID, transactions["[-GENESIS, A+, B+, C+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-GENESIS, A+, B+, C+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-GENESIS, A+, B+, C+]"])
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-GENESIS, A+, B+, C+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-GENESIS, A+, B+, C+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address GENESIS is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(GENESIS)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 3333)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address A is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(A)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address B is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(B)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address C is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(C)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-A, D+]
+	{
+		// create transaction + payload
+		transactions["[-A, D+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(A), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(D): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+			}),
+		)
+		transactions["[-A, D+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(A)))
+		valueObjects["[-A, D+]"] = payload.New(payload.GenesisID, valueObjects["[-GENESIS, A+, B+, C+]"].ID(), transactions["[-A, D+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-A, D+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-A, D+]"])
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-A, D+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-A, D+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address A is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(A)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address D is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(D)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-B, -C, E+]
+	{
+		// create transaction + payload
+		transactions["[-B, -C, E+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(B), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+				transaction.NewOutputID(seed.Address(C), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(E): {
+					balance.New(balance.ColorIOTA, 2222),
+				},
+			}),
+		)
+		transactions["[-B, -C, E+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(B)))
+		transactions["[-B, -C, E+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(C)))
+		valueObjects["[-B, -C, E+]"] = payload.New(payload.GenesisID, valueObjects["[-GENESIS, A+, B+, C+]"].ID(), transactions["[-B, -C, E+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-B, -C, E+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-B, -C, E+]"])
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-B, -C, E+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-B, -C, E+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address B is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(B)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address C is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(C)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address E is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(E)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 2222)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-B, -C, E+] (Reattachment)
+	{
+		// create payload
+		valueObjects["[-B, -C, E+] (Reattachment)"] = payload.New(valueObjects["[-B, -C, E+]"].ID(), valueObjects["[-GENESIS, A+, B+, C+]"].ID(), transactions["[-B, -C, E+]"])
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-B, -C, E+] (Reattachment)"])
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-B, -C, E+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-B, -C, E+] (Reattachment)"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address B is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(B)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address C is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(C)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address E is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(E)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 2222)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-A, F+]
+	{
+		// create transaction + payload
+		transactions["[-A, F+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(A), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(F): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+			}),
+		)
+		transactions["[-A, F+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(A)))
+		valueObjects["[-A, F+]"] = payload.New(valueObjects["[-B, -C, E+]"].ID(), valueObjects["[-GENESIS, A+, B+, C+]"].ID(), transactions["[-A, F+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-A, F+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-A, F+]"])
+
+		// create aliases for the branches
+		branches["A"] = branchmanager.NewBranchID(transactions["[-A, D+]"].ID())
+		branches["B"] = branchmanager.NewBranchID(transactions["[-A, F+]"].ID())
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-A, F+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branches["B"], transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-A, F+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branches["B"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address A is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(A)).Consume(func(output *Output) {
+			assert.Equal(t, 2, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address F is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(F)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["B"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address D is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(D)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["A"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-A, D+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branches["A"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the branches are conflicting
+		branchesConflicting, err := tangle.branchManager.BranchesConflicting(branches["A"], branches["B"])
+		require.NoError(t, err)
+		assert.True(t, branchesConflicting, "the branches should be conflicting")
+	}
+
+	// [-E, -F, G+]
+	{
+		// create transaction + payload
+		transactions["[-E, -F, G+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(E), transactions["[-B, -C, E+]"].ID()),
+				transaction.NewOutputID(seed.Address(F), transactions["[-A, F+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(G): {
+					balance.New(balance.ColorIOTA, 3333),
+				},
+			}),
+		)
+		transactions["[-E, -F, G+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(E)))
+		transactions["[-E, -F, G+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(F)))
+		valueObjects["[-E, -F, G+]"] = payload.New(valueObjects["[-B, -C, E+]"].ID(), valueObjects["[-A, F+]"].ID(), transactions["[-E, -F, G+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-E, -F, G+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-E, -F, G+]"])
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-E, -F, G+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branches["B"], transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-E, -F, G+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branches["B"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address E is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(E)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 2222)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address F is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(F)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["B"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address G is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(G)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 3333)}, output.Balances())
+			assert.Equal(t, branches["B"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-F, -D, Y+]
+	{
+		// create transaction + payload
+		transactions["[-F, -D, Y+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(D), transactions["[-A, D+]"].ID()),
+				transaction.NewOutputID(seed.Address(F), transactions["[-A, F+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(Y): {
+					balance.New(balance.ColorIOTA, 2222),
+				},
+			}),
+		)
+		transactions["[-F, -D, Y+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(D)))
+		transactions["[-F, -D, Y+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(F)))
+		valueObjects["[-F, -D, Y+]"] = payload.New(valueObjects["[-A, F+]"].ID(), valueObjects["[-A, D+]"].ID(), transactions["[-F, -D, Y+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-F, -D, Y+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-F, -D, Y+]"])
+
+		// check if all of the invalids transactions models were deleted
+		assert.False(t, tangle.Transaction(transactions["[-F, -D, Y+]"].ID()).Consume(func(metadata *transaction.Transaction) {}), "the transaction should not be found")
+		assert.False(t, tangle.TransactionMetadata(transactions["[-F, -D, Y+]"].ID()).Consume(func(metadata *TransactionMetadata) {}), "the transaction metadata should not be found")
+		assert.False(t, tangle.Payload(valueObjects["[-F, -D, Y+]"].ID()).Consume(func(payload *payload.Payload) {}), "the payload should not be found")
+		assert.False(t, tangle.PayloadMetadata(valueObjects["[-F, -D, Y+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {}), "the payload metadata should not be found")
+		assert.True(t, tangle.Approvers(valueObjects["[-A, F+]"].ID()).Consume(func(approver *PayloadApprover) {
+			assert.NotEqual(t, approver.ApprovingPayloadID(), valueObjects["[-F, -D, Y+]"].ID(), "the invalid value object should not show up as an approver")
+		}), "the should be approvers of the referenced output")
+		assert.False(t, tangle.Approvers(valueObjects["[-A, D+]"].ID()).Consume(func(approver *PayloadApprover) {}), "approvers should be empty")
+		assert.False(t, tangle.Attachments(transactions["[-F, -D, Y+]"].ID()).Consume(func(attachment *Attachment) {}), "the transaction should not have any attachments")
+		assert.False(t, tangle.Consumers(transaction.NewOutputID(seed.Address(D), transactions["[-A, D+]"].ID())).Consume(func(consumer *Consumer) {}), "the consumers of the used input should be empty")
+		assert.True(t, tangle.Consumers(transaction.NewOutputID(seed.Address(F), transactions["[-A, F+]"].ID())).Consume(func(consumer *Consumer) {
+			assert.NotEqual(t, consumer.TransactionID(), transactions["[-F, -D, Y+]"].ID(), "the consumers should not contain the invalid transaction")
+		}), "the consumers should not be empty")
+	}
+
+	// [-B, -C, E+] (2nd Reattachment)
+	{
+		valueObjects["[-B, -C, E+] (2nd Reattachment)"] = payload.New(valueObjects["[-A, F+]"].ID(), valueObjects["[-A, D+]"].ID(), transactions["[-B, -C, E+]"])
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-B, -C, E+] (2nd Reattachment)"])
+
+		// check if all of the valid transactions models were NOT deleted
+		assert.True(t, tangle.Transaction(transactions["[-B, -C, E+]"].ID()).Consume(func(metadata *transaction.Transaction) {}))
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-B, -C, E+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branchmanager.MasterBranchID, transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload and its corresponding models are not found in the database (payload was invalid)
+		assert.False(t, tangle.Payload(valueObjects["[-B, -C, E+] (2nd Reattachment)"].ID()).Consume(func(payload *payload.Payload) {}), "the payload should not exist")
+		assert.False(t, tangle.PayloadMetadata(valueObjects["[-B, -C, E+] (2nd Reattachment)"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {}), "the payload metadata should not exist")
+		assert.True(t, tangle.Attachments(transactions["[-B, -C, E+]"].ID()).Consume(func(attachment *Attachment) {
+			assert.NotEqual(t, valueObjects["[-B, -C, E+] (2nd Reattachment)"].ID(), attachment.PayloadID(), "the attachment to the payload should be deleted")
+		}), "there should be attachments of the transaction")
+		assert.True(t, tangle.Approvers(valueObjects["[-A, F+]"].ID()).Consume(func(approver *PayloadApprover) {
+			assert.NotEqual(t, valueObjects["[-A, F+]"].ID(), approver.ApprovingPayloadID(), "there should not be an approver reference to the invalid payload")
+			assert.NotEqual(t, valueObjects["[-A, D+]"].ID(), approver.ApprovingPayloadID(), "there should not be an approver reference to the invalid payload")
+		}), "there should be approvers")
+		assert.False(t, tangle.Approvers(valueObjects["[-A, D+]"].ID()).Consume(func(approver *PayloadApprover) {}), "there should be no approvers")
+	}
+
+	return tangle, transactions, valueObjects, branches, seed
+}
+
+// preparePropagationScenario1 creates a tangle according to `img/scenario2.png`.
+func preparePropagationScenario2(t *testing.T) (*Tangle, map[string]*transaction.Transaction, map[string]*payload.Payload, map[string]branchmanager.BranchID, *wallet.Seed) {
+	tangle, transactions, valueObjects, branches, seed := preparePropagationScenario1(t)
+
+	// [-C, H+]
+	{
+		// create transaction + payload
+		transactions["[-C, H+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(C), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(H): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+			}),
+		)
+		transactions["[-C, H+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(C)))
+		valueObjects["[-C, H+]"] = payload.New(valueObjects["[-GENESIS, A+, B+, C+]"].ID(), valueObjects["[-A, D+]"].ID(), transactions["[-C, H+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-C, H+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-C, H+]"])
+
+		// create alias for the branch
+		branches["C"] = branchmanager.NewBranchID(transactions["[-C, H+]"].ID())
+		branches["AC"] = tangle.BranchManager().GenerateAggregatedBranchID(branches["A"], branches["C"])
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-C, H+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branches["C"], transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-C, H+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.NotEqual(t, branches["C"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			assert.Equal(t, branches["AC"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address C is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(C)).Consume(func(output *Output) {
+			assert.Equal(t, 2, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address H is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(H)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["C"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// Branch D
+
+		// create alias for the branch
+		branches["D"] = branchmanager.NewBranchID(transactions["[-B, -C, E+]"].ID())
+		branches["BD"] = tangle.branchManager.GenerateAggregatedBranchID(branches["B"], branches["D"])
+
+		{
+			// check if transaction metadata is found in database
+			assert.True(t, tangle.PayloadMetadata(valueObjects["[-B, -C, E+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+				assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+				assert.Equal(t, branches["D"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			}))
+
+			// check if transaction metadata is found in database
+			assert.True(t, tangle.PayloadMetadata(valueObjects["[-B, -C, E+] (Reattachment)"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+				assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+				assert.Equal(t, branches["D"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			}))
+		}
+
+		// check if the branches C and D are conflicting
+		branchesConflicting, err := tangle.branchManager.BranchesConflicting(branches["C"], branches["D"])
+		require.NoError(t, err)
+		assert.True(t, branchesConflicting, "the branches should be conflicting")
+
+		// Aggregated Branch [BD]
+		{
+			// check if transaction metadata is found in database
+			assert.True(t, tangle.PayloadMetadata(valueObjects["[-E, -F, G+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+				assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+				assert.Equal(t, branches["BD"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			}))
+
+			// check if transaction metadata is found in database
+			assert.True(t, tangle.PayloadMetadata(valueObjects["[-E, -F, G+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+				assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+				assert.Equal(t, branches["BD"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+			}))
+		}
+	}
+
+	// [-H, -D, I+]
+	{
+		// create transaction + payload
+		transactions["[-H, -D, I+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(H), transactions["[-C, H+]"].ID()),
+				transaction.NewOutputID(seed.Address(D), transactions["[-A, D+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(I): {
+					balance.New(balance.ColorIOTA, 2222),
+				},
+			}),
+		)
+		transactions["[-H, -D, I+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(H)))
+		transactions["[-H, -D, I+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(D)))
+		valueObjects["[-H, -D, I+]"] = payload.New(valueObjects["[-C, H+]"].ID(), valueObjects["[-A, D+]"].ID(), transactions["[-H, -D, I+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-H, -D, I+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-H, -D, I+]"])
+
+		// create alias for the branch
+		branches["AC"] = tangle.branchManager.GenerateAggregatedBranchID(branches["A"], branches["C"])
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-H, -D, I+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branches["AC"], transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-H, -D, I+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branches["AC"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address H is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(H)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["C"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address D is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(D)).Consume(func(output *Output) {
+			assert.Equal(t, 1, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["A"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address I is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(I)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 2222)}, output.Balances())
+			assert.Equal(t, branches["AC"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+	}
+
+	// [-B, J+]
+	{
+		// create transaction + payload
+		transactions["[-B, J+]"] = transaction.New(
+			transaction.NewInputs(
+				transaction.NewOutputID(seed.Address(B), transactions["[-GENESIS, A+, B+, C+]"].ID()),
+			),
+
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				seed.Address(J): {
+					balance.New(balance.ColorIOTA, 1111),
+				},
+			}),
+		)
+		transactions["[-B, J+]"].Sign(signaturescheme.ED25519(*seed.KeyPair(B)))
+		valueObjects["[-B, J+]"] = payload.New(valueObjects["[-C, H+]"].ID(), valueObjects["[-A, D+]"].ID(), transactions["[-B, J+]"])
+
+		// check if signatures are valid
+		assert.True(t, transactions["[-B, J+]"].SignaturesValid())
+
+		// attach payload
+		tangle.AttachPayloadSync(valueObjects["[-B, J+]"])
+
+		// create alias for the branch
+		branches["E"] = branchmanager.NewBranchID(transactions["[-B, J+]"].ID())
+
+		// check if transaction metadata is found in database
+		assert.True(t, tangle.TransactionMetadata(transactions["[-B, J+]"].ID()).Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.True(t, transactionMetadata.Solid(), "the transaction is not solid")
+			assert.Equal(t, branches["E"], transactionMetadata.BranchID(), "the transaction was booked into the wrong branch")
+		}))
+
+		// create alias for the branch
+		branches["ACE"] = tangle.branchManager.GenerateAggregatedBranchID(branches["A"], branches["C"], branches["E"])
+
+		// check if payload metadata is found in database
+		assert.True(t, tangle.PayloadMetadata(valueObjects["[-B, J+]"].ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.True(t, payloadMetadata.IsSolid(), "the payload is not solid")
+			assert.Equal(t, branches["ACE"], payloadMetadata.BranchID(), "the payload was booked into the wrong branch")
+		}))
+
+		// check if the balance on address B is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(B)).Consume(func(output *Output) {
+			assert.Equal(t, 2, output.ConsumerCount(), "the output should be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the balance on address J is found in the database
+		assert.True(t, tangle.OutputsOnAddress(seed.Address(J)).Consume(func(output *Output) {
+			assert.Equal(t, 0, output.ConsumerCount(), "the output should not be spent")
+			assert.Equal(t, []*balance.Balance{balance.New(balance.ColorIOTA, 1111)}, output.Balances())
+			assert.Equal(t, branches["E"], output.BranchID(), "the output was booked into the wrong branch")
+			assert.True(t, output.Solid(), "the output is not solid")
+		}))
+
+		// check if the branches D and E are conflicting
+		branchesConflicting, err := tangle.branchManager.BranchesConflicting(branches["D"], branches["E"])
+		require.NoError(t, err)
+		assert.True(t, branchesConflicting, "the branches should be conflicting")
+
+	}
+
+	return tangle, transactions, valueObjects, branches, seed
+}
+
+func TestPropagationScenario1(t *testing.T) {
+	// img/scenario1.png
+
+	// test past cone monotonicity - all value objects MUST be confirmed
+	{
+		tangle, transactions, valueObjects, _, _ := preparePropagationScenario1(t)
+
+		// initialize debugger for this test
+		debugger.ResetAliases()
+		for name, valueObject := range valueObjects {
+			debugger.RegisterAlias(valueObject.ID(), "ValueObjectID"+name)
+		}
+		for name, tx := range transactions {
+			debugger.RegisterAlias(tx.ID(), "TransactionID"+name)
+		}
+
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"], true)
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, false, true, false, false)
+
+		// should not be confirmed because [-GENESIS, A+, B+, C+] is not confirmed
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-B, -C, E+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-B, -C, E+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], true, true, true, false, false)
+
+		// now finalize [-GENESIS, A+, B+, C+]
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, true, true, true, false)
+
+		// and [-B, -C, E+] should be confirmed now too
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], true, true, true, true, false)
+		// as well as the reattachment
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], true, true, true, true, false)
+	}
+
+	// test future cone monotonicity simple - everything MUST be rejected and finalized if spending funds from rejected tx
+	{
+		tangle, transactions, valueObjects, _, _ := preparePropagationScenario1(t)
+
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], false, true, false, false, true)
+
+		// check future cone to be rejected
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], false, true, false, false, true)
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], false, true, false, false, true)
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], false, true, false, false, true)
+		verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], false, true, false, false, true)
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, true, false, false, true)
+	}
+
+	// test future cone monotonicity more complex - everything MUST be rejected and finalized if spending funds from rejected tx
+	{
+		tangle, transactions, valueObjects, branches, _ := preparePropagationScenario1(t)
+
+		// initialize debugger for this test
+		debugger.ResetAliases()
+		for name, valueObject := range valueObjects {
+			debugger.RegisterAlias(valueObject.ID(), "ValueObjectID"+name)
+		}
+		for name, tx := range transactions {
+			debugger.RegisterAlias(tx.ID(), "TransactionID"+name)
+		}
+
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, true, true, true, false)
+
+		// finalize & reject
+		//debugger.Enable()
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-B, -C, E+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], false, true, false, false, true)
+		//debugger.Disable()
+
+		// check future cone to be rejected
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], false, true, false, false, true)
+
+		// [-A, F+] should be rejected but the tx not finalized since it spends funds from [-GENESIS, A+, B+, C+] which is confirmed
+		verifyTransactionInclusionState(t, tangle, valueObjects["[-A, F+]"], false, false, false, false, false)
+		verifyValueObjectInclusionState(t, tangle, valueObjects["[-A, F+]"], false, false, true)
+		verifyBranchState(t, tangle, branches["B"], false, false, false, false)
+
+		// [-E, -F, G+] should be finalized and rejected since it spends funds from [-B, -C, E+]
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, true, false, false, true)
+
+		// [-A, D+] should be unchanged
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], false, false, false, false, false)
+		verifyBranchState(t, tangle, branches["A"], false, false, false, false)
+	}
+
+	// simulate vote on [-A, F+] -> Branch A becomes rejected, Branch B confirmed
+	{
+		tangle, transactions, valueObjects, branches, _ := preparePropagationScenario1(t)
+
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, true, true, true, false)
+
+		// check future cone
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], false, false, false, false, false)
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], false, false, false, false, false)
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], false, false, false, false, false)
+		verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], false, false, false, false, false)
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, false, false, false, false)
+
+		// confirm [-B, -C, E+]
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-B, -C, E+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-B, -C, E+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], true, true, true, true, false)
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], true, true, true, true, false)
+
+		// prefer [-A, D+]
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-A, D+]"], true)
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], true, false, true, false, false)
+		verifyBranchState(t, tangle, branches["A"], false, true, false, false)
+
+		// simulate vote result to like [-A, F+] -> [-A, F+] becomes confirmed and [-A, D+] rejected
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-A, F+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-A, F+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], true, true, true, true, false)
+		verifyBranchState(t, tangle, branches["B"], true, true, true, false)
+
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, false, false, false, false)
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-E, -F, G+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-E, -F, G+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], true, true, true, true, false)
+
+		// [-A, D+] should be rejected
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], false, true, false, false, true)
+		verifyBranchState(t, tangle, branches["A"], true, false, false, true)
+	}
+
+	// simulate vote on [-A, D+] -> Branch B becomes rejected, Branch A confirmed
+	{
+		tangle, transactions, valueObjects, branches, _ := preparePropagationScenario1(t)
+
+		// confirm [-GENESIS, A+, B+, C+]
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, true, true, true, false)
+
+		// confirm [-B, -C, E+]
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-B, -C, E+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-B, -C, E+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], true, true, true, true, false)
+
+		// prefer [-A, F+] and thus Branch B
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-A, F+]"], true)
+		verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], true, false, true, false, false)
+		verifyBranchState(t, tangle, branches["B"], false, true, false, false)
+		// prefer [-E, -F, G+]
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-E, -F, G+]"], true)
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], true, false, true, false, false)
+
+		// simulate vote result to like [-A, D+] -> [-A, D+] becomes confirmed and [-A, F+], [-E, -F, G+] rejected
+		setTransactionPreferredWithCheck(t, tangle, transactions["[-A, D+]"], true)
+		setTransactionFinalizedWithCheck(t, tangle, transactions["[-A, D+]"])
+		verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], true, true, true, true, false)
+		verifyBranchState(t, tangle, branches["A"], true, true, true, false)
+
+		// [-A, F+], [-E, -F, G+] should be finalized and rejected
+		verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], false, true, false, false, true)
+		verifyBranchState(t, tangle, branches["B"], true, false, false, true)
+		verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, true, false, false, true)
+	}
+}
+
+func TestPropagationScenario2(t *testing.T) {
+	// img/scenario2.png
+	tangle, transactions, valueObjects, branches, _ := preparePropagationScenario2(t)
+
+	// initialize debugger for this test
+	debugger.ResetAliases()
+	for name, valueObject := range valueObjects {
+		debugger.RegisterAlias(valueObject.ID(), "ValueObjectID"+name)
+	}
+	for name, tx := range transactions {
+		debugger.RegisterAlias(tx.ID(), "TransactionID"+name)
+	}
+
+	// confirm [-GENESIS, A+, B+, C+]
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"], true)
+	setTransactionFinalizedWithCheck(t, tangle, transactions["[-GENESIS, A+, B+, C+]"])
+	verifyInclusionState(t, tangle, valueObjects["[-GENESIS, A+, B+, C+]"], true, true, true, true, false)
+
+	// prefer [-B, -C, E+] and thus Branch D
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-B, -C, E+]"], true)
+	verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], true, false, true, false, false)
+	verifyBranchState(t, tangle, branches["D"], false, true, false, false)
+	verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], true, false, true, false, false)
+
+	// prefer [-A, F+] and thus Branch B
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-A, F+]"], true)
+	verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], true, false, true, false, false)
+	verifyBranchState(t, tangle, branches["B"], false, true, false, false)
+
+	// prefer [-E, -F, G+]
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-E, -F, G+]"], true)
+	verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], true, false, true, false, false)
+	// check aggregated branch
+	verifyBranchState(t, tangle, branches["BD"], false, true, false, false)
+
+	// verify states of other transactions, value objects and branches
+	verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], false, false, false, false, false)
+	verifyBranchState(t, tangle, branches["A"], false, false, false, false)
+
+	verifyInclusionState(t, tangle, valueObjects["[-C, H+]"], false, false, false, false, false)
+	verifyBranchState(t, tangle, branches["C"], false, false, false, false)
+
+	verifyInclusionState(t, tangle, valueObjects["[-H, -D, I+]"], false, false, false, false, false)
+	// check aggregated branch
+	verifyBranchState(t, tangle, branches["AC"], false, false, false, false)
+
+	verifyInclusionState(t, tangle, valueObjects["[-B, J+]"], false, false, false, false, false)
+	verifyBranchState(t, tangle, branches["E"], false, false, false, false)
+	verifyBranchState(t, tangle, branches["ACE"], false, false, false, false)
+
+	// prefer [-H, -D, I+] - should be liked after votes on [-A, D+] and [-C, H+]
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-H, -D, I+]"], true)
+	verifyInclusionState(t, tangle, valueObjects["[-H, -D, I+]"], true, false, false, false, false)
+
+	// simulate vote result to like [-A, D+] -> [-A, D+] becomes confirmed and [-A, F+], [-E, -F, G+] rejected
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-A, D+]"], true)
+	setTransactionFinalizedWithCheck(t, tangle, transactions["[-A, D+]"])
+	verifyInclusionState(t, tangle, valueObjects["[-A, D+]"], true, true, true, true, false)
+	verifyBranchState(t, tangle, branches["A"], true, true, true, false)
+
+	verifyInclusionState(t, tangle, valueObjects["[-A, F+]"], false, true, false, false, true)
+	verifyBranchState(t, tangle, branches["B"], true, false, false, true)
+	verifyInclusionState(t, tangle, valueObjects["[-E, -F, G+]"], false, true, false, false, true)
+
+	// simulate vote result to like [-C, H+] -> [-C, H+] becomes confirmed and [-B, -C, E+], [-B, -C, E+] (Reattachment) rejected
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-C, H+]"], true)
+	setTransactionFinalizedWithCheck(t, tangle, transactions["[-C, H+]"])
+
+	verifyInclusionState(t, tangle, valueObjects["[-C, H+]"], true, true, true, true, false)
+	verifyBranchState(t, tangle, branches["C"], true, true, true, false)
+	verifyBranchState(t, tangle, branches["AC"], true, true, true, false)
+
+	verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+]"], false, true, false, false, true)
+	verifyBranchState(t, tangle, branches["D"], true, false, false, true)
+	verifyInclusionState(t, tangle, valueObjects["[-B, -C, E+] (Reattachment)"], false, true, false, false, true)
+	verifyBranchState(t, tangle, branches["BD"], true, false, false, true)
+	// TODO: BD is not finalized
+
+	// [-H, -D, I+] should now be liked
+	verifyInclusionState(t, tangle, valueObjects["[-H, -D, I+]"], true, false, true, false, false)
+	setTransactionFinalizedWithCheck(t, tangle, transactions["[-H, -D, I+]"])
+	verifyInclusionState(t, tangle, valueObjects["[-H, -D, I+]"], true, true, true, true, false)
+
+	// [-B, J+] should be unchanged
+	verifyInclusionState(t, tangle, valueObjects["[-B, J+]"], false, false, false, false, false)
+	// [-B, J+] should become confirmed after preferring and finalizing
+	setTransactionPreferredWithCheck(t, tangle, transactions["[-B, J+]"], true)
+	setTransactionFinalizedWithCheck(t, tangle, transactions["[-B, J+]"])
+	verifyInclusionState(t, tangle, valueObjects["[-B, J+]"], true, true, true, true, false)
+	verifyBranchState(t, tangle, branches["E"], true, true, true, false)
+	verifyBranchState(t, tangle, branches["ACE"], true, true, true, false)
+}
+
+// verifyBranchState verifies the the branch state according to the given parameters.
+func verifyBranchState(t *testing.T, tangle *Tangle, id branchmanager.BranchID, finalized, liked, confirmed, rejected bool) {
+	assert.True(t, tangle.branchManager.Branch(id).Consume(func(branch *branchmanager.Branch) {
+		assert.Equalf(t, finalized, branch.Finalized(), "branch finalized state does not match")
+		assert.Equalf(t, liked, branch.Liked(), "branch liked state does not match")
+
+		assert.Equalf(t, confirmed, branch.Confirmed(), "branch confirmed state does not match")
+		assert.Equalf(t, rejected, branch.Rejected(), "branch rejected state does not match")
+	}))
+}
+
+// verifyInclusionState verifies the inclusion state of outputs and transaction according to the given parameters.
+func verifyTransactionInclusionState(t *testing.T, tangle *Tangle, valueObject *payload.Payload, preferred, finalized, liked, confirmed, rejected bool) {
+	tx := valueObject.Transaction()
+
+	// check outputs
+	tx.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+		assert.True(t, tangle.TransactionOutput(transaction.NewOutputID(address, tx.ID())).Consume(func(output *Output) {
+			assert.Equalf(t, liked, output.Liked(), "output liked state does not match")
+			assert.Equalf(t, confirmed, output.Confirmed(), "output confirmed state does not match")
+			assert.Equalf(t, rejected, output.Rejected(), "output rejected state does not match")
+		}))
+		return true
+	})
+
+	// check transaction
+	assert.True(t, tangle.TransactionMetadata(tx.ID()).Consume(func(metadata *TransactionMetadata) {
+		assert.Equalf(t, preferred, metadata.Preferred(), "tx preferred state does not match")
+		assert.Equalf(t, finalized, metadata.Finalized(), "tx finalized state does not match")
+
+		assert.Equalf(t, liked, metadata.Liked(), "tx liked state does not match")
+		assert.Equalf(t, confirmed, metadata.Confirmed(), "tx confirmed state does not match")
+		assert.Equalf(t, rejected, metadata.Rejected(), "tx rejected state does not match")
+	}))
+}
+
+// verifyValueObjectInclusionState verifies the inclusion state of a value object according to the given parameters.
+func verifyValueObjectInclusionState(t *testing.T, tangle *Tangle, valueObject *payload.Payload, liked, confirmed, rejected bool) {
+	assert.True(t, tangle.PayloadMetadata(valueObject.ID()).Consume(func(payloadMetadata *PayloadMetadata) {
+		assert.Equalf(t, liked, payloadMetadata.Liked(), "value object liked state does not match")
+		assert.Equalf(t, confirmed, payloadMetadata.Confirmed(), "value object confirmed state does not match")
+		assert.Equalf(t, rejected, payloadMetadata.Rejected(), "value object rejected state does not match")
+	}))
+}
+
+// verifyInclusionState verifies the inclusion state of outputs, transaction and value object according to the given parameters.
+func verifyInclusionState(t *testing.T, tangle *Tangle, valueObject *payload.Payload, preferred, finalized, liked, confirmed, rejected bool) {
+	verifyTransactionInclusionState(t, tangle, valueObject, preferred, finalized, liked, confirmed, rejected)
+	verifyValueObjectInclusionState(t, tangle, valueObject, liked, confirmed, rejected)
+}
+
+// setTransactionPreferredWithCheck sets the transaction to preferred and makes sure that no error occurred and it's modified.
+func setTransactionPreferredWithCheck(t *testing.T, tangle *Tangle, tx *transaction.Transaction, preferred bool) {
+	modified, err := tangle.SetTransactionPreferred(tx.ID(), preferred)
+	require.NoError(t, err)
+	assert.True(t, modified)
+}
+
+// setTransactionFinalizedWithCheck sets the transaction to finalized and makes sure that no error occurred and it's modified.
+func setTransactionFinalizedWithCheck(t *testing.T, tangle *Tangle, tx *transaction.Transaction) {
+	modified, err := tangle.SetTransactionFinalized(tx.ID())
+	require.NoError(t, err)
+	assert.True(t, modified)
+}
diff --git a/dapps/valuetransfers/packages/tangle/tangle_test.go b/dapps/valuetransfers/packages/tangle/tangle_test.go
index a418b92628de38e29889a85d85669c8f590c9d33..dab6589cc4d360d0fa88d0651f74a08000749a05 100644
--- a/dapps/valuetransfers/packages/tangle/tangle_test.go
+++ b/dapps/valuetransfers/packages/tangle/tangle_test.go
@@ -1,66 +1,1550 @@
 package tangle
 
 import (
+	"container/list"
+	"math"
 	"testing"
-	"time"
 
-	"github.com/stretchr/testify/assert"
+	"github.com/google/go-cmp/cmp"
 
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+
+	"github.com/iotaledger/hive.go/events"
+	"github.com/iotaledger/hive.go/kvstore/mapdb"
+	"github.com/iotaledger/hive.go/types"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
-func TestNewOutput(t *testing.T) {
-	randomAddress := address.Random()
-	randomTransactionID := transaction.RandomID()
+func TestSetTransactionPreferred(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+	tx := createDummyTransaction()
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+	tangle.storeTransactionModels(valueObject)
+
+	modified, err := tangle.SetTransactionPreferred(tx.ID(), true)
+	require.NoError(t, err)
+	assert.True(t, modified)
+}
+
+// TestBookTransaction tests the following cases:
+// - missing output
+// - transaction already booked by another process
+// - booking first spend
+// - booking double spend
+func TestBookTransaction(t *testing.T) {
+
+	// CASE: missing output
+	t.Run("CASE: missing output", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		cachedTransaction, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+
+		transactionBooked, decisionPending, err := tangle.bookTransaction(cachedTransaction, cachedTransactionMetadata)
+		assert.False(t, transactionBooked)
+		assert.False(t, decisionPending)
+		assert.Error(t, err)
+	})
+
+	// CASE: transaction already booked by another process
+	t.Run("CASE: transaction already booked by another process", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		cachedTransaction, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+
+		transactionMetadata := cachedTransactionMetadata.Unwrap()
+		transactionMetadata.SetSolid(true)
+
+		transactionBooked, decisionPending, err := tangle.bookTransaction(cachedTransaction, cachedTransactionMetadata)
+		require.NoError(t, err)
+		assert.False(t, transactionBooked)
+		assert.False(t, decisionPending)
+	})
+
+	// CASE: booking first spend
+	t.Run("CASE: booking first spend", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		// build first spending
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 3),
+					balance.New(color1, 3),
+				},
+			}),
+		)
+
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		cachedTransaction, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+		txMetadata := cachedTransactionMetadata.Unwrap()
+
+		// assert that branchID is undefined before being booked
+		assert.Equal(t, branchmanager.UndefinedBranchID, txMetadata.BranchID())
+
+		transactionBooked, decisionPending, err := tangle.bookTransaction(cachedTransaction, cachedTransactionMetadata)
+		require.NoError(t, err)
+		assert.True(t, transactionBooked, "transactionBooked")
+		assert.False(t, decisionPending, "decisionPending")
+
+		// assert that branchID is the same as the MasterBranchID
+		assert.Equal(t, branchmanager.MasterBranchID, txMetadata.BranchID())
+
+		// CASE: booking double spend
+		t.Run("CASE: booking double spend", func(t *testing.T) {
+			// build second spending
+			tx := transaction.New(
+				transaction.NewInputs(inputIDs...),
+				// outputs
+				transaction.NewOutputs(map[address.Address][]*balance.Balance{
+					address.Random(): {
+						balance.New(balance.ColorIOTA, 3),
+						balance.New(color1, 3),
+					},
+				}),
+			)
+
+			valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+			cachedTransaction, cachedTransactionMetadata, _, _ = tangle.storeTransactionModels(valueObject)
+			txMetadata := cachedTransactionMetadata.Unwrap()
+
+			// assert that branchID is undefined before being booked
+			assert.Equal(t, branchmanager.UndefinedBranchID, txMetadata.BranchID())
+
+			transactionBooked, decisionPending, err := tangle.bookTransaction(cachedTransaction, cachedTransactionMetadata)
+			require.NoError(t, err)
+			assert.True(t, transactionBooked, "transactionBooked")
+			assert.True(t, decisionPending, "decisionPending")
+
+			// assert that first spend and double spend have different BranchIDs
+			assert.NotEqual(t, branchmanager.MasterBranchID, txMetadata.BranchID(), "BranchID")
+		})
+	})
+}
+
+func TestCalculateBranchOfTransaction(t *testing.T) {
+
+	// CASE: missing output
+	t.Run("CASE: missing output", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		cachedBranch, err := tangle.calculateBranchOfTransaction(tx)
+		assert.Error(t, err)
+		assert.Nil(t, cachedBranch)
+	})
+
+	// CASE: same as master branch
+	t.Run("CASE: same as master branch", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 3),
+					balance.New(color1, 3),
+				},
+			}),
+		)
+
+		cachedBranch, err := tangle.calculateBranchOfTransaction(tx)
+		require.NoError(t, err)
+		assert.Equal(t, branchmanager.MasterBranchID, cachedBranch.Unwrap().ID())
+	})
+}
+
+func TestMoveTransactionToBranch(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+	// prepare snapshot
+	color1 := [32]byte{1}
+	outputs := map[address.Address][]*balance.Balance{
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 1),
+		},
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 2),
+			balance.New(color1, 3),
+		},
+	}
+	inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+	tx := transaction.New(
+		transaction.NewInputs(inputIDs...),
+		// outputs
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 3),
+				balance.New(color1, 3),
+			},
+		}),
+	)
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+	cachedTransaction, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+	txMetadata := cachedTransactionMetadata.Unwrap()
+
+	// create conflicting branche
+	cachedBranch2, _ := tangle.BranchManager().Fork(branchmanager.BranchID{2}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+	defer cachedBranch2.Release()
+
+	err := tangle.moveTransactionToBranch(cachedTransaction.Retain(), cachedTransactionMetadata.Retain(), cachedBranch2.Retain())
+	require.NoError(t, err)
+	assert.Equal(t, branchmanager.BranchID{2}, txMetadata.BranchID())
+}
+
+func TestFork(t *testing.T) {
+	// CASE: already finalized
+	t.Run("CASE: already finalized", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 3),
+					balance.New(color1, 3),
+				},
+			}),
+		)
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		_, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+		txMetadata := cachedTransactionMetadata.Unwrap()
+
+		txMetadata.SetFinalized(true)
+
+		forked, finalized, err := tangle.Fork(tx.ID(), []transaction.OutputID{})
+		require.NoError(t, err)
+		assert.False(t, forked)
+		assert.True(t, finalized)
+	})
+
+	t.Run("CASE: normal fork", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 3),
+					balance.New(color1, 3),
+				},
+			}),
+		)
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		tangle.storeTransactionModels(valueObject)
+
+		forked, finalized, err := tangle.Fork(tx.ID(), []transaction.OutputID{})
+		require.NoError(t, err)
+		assert.True(t, forked, "forked")
+		assert.False(t, finalized, "finalized")
+
+		t.Run("CASE: branch existed already", func(t *testing.T) {
+			forked, finalized, err = tangle.Fork(tx.ID(), []transaction.OutputID{})
+			require.NoError(t, err)
+			assert.False(t, forked, "forked")
+			assert.False(t, finalized, "finalized")
+		})
+	})
+
+}
+
+func TestBookPayload(t *testing.T) {
+	t.Run("CASE: undefined branchID", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		cachedPayload, cachedMetadata, _ := tangle.storePayload(valueObject)
+		_, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+
+		payloadBooked, err := tangle.bookPayload(cachedPayload.Retain(), cachedMetadata.Retain(), cachedTransactionMetadata.Retain())
+		defer func() {
+			cachedPayload.Release()
+			cachedMetadata.Release()
+			cachedTransactionMetadata.Release()
+		}()
+
+		require.NoError(t, err)
+		assert.False(t, payloadBooked, "payloadBooked")
+	})
+
+	t.Run("CASE: successfully book", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		cachedPayload, cachedMetadata, _ := tangle.storePayload(valueObject)
+		metadata := cachedMetadata.Unwrap()
+
+		metadata.SetBranchID(branchmanager.BranchID{1})
+		metadata.SetBranchID(branchmanager.BranchID{2})
+
+		_, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+		txMetadata := cachedTransactionMetadata.Unwrap()
+		txMetadata.SetBranchID(branchmanager.BranchID{1})
+
+		payloadBooked, err := tangle.bookPayload(cachedPayload.Retain(), cachedMetadata.Retain(), cachedTransactionMetadata.Retain())
+		defer func() {
+			cachedPayload.Release()
+			cachedMetadata.Release()
+			cachedTransactionMetadata.Release()
+		}()
+
+		require.NoError(t, err)
+		assert.True(t, payloadBooked, "payloadBooked")
+	})
+
+	t.Run("CASE: not booked", func(t *testing.T) {
+		tangle := New(mapdb.NewMapDB())
+
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		cachedPayload, cachedMetadata, _ := tangle.storePayload(valueObject)
+		metadata := cachedMetadata.Unwrap()
+
+		metadata.SetBranchID(branchmanager.BranchID{1})
+		metadata.SetBranchID(branchmanager.BranchID{1})
 
-	output := NewOutput(randomAddress, randomTransactionID, branchmanager.MasterBranchID, []*balance.Balance{
-		balance.New(balance.ColorIOTA, 1337),
+		_, cachedTransactionMetadata, _, _ := tangle.storeTransactionModels(valueObject)
+		txMetadata := cachedTransactionMetadata.Unwrap()
+		txMetadata.SetBranchID(branchmanager.BranchID{1})
+
+		payloadBooked, err := tangle.bookPayload(cachedPayload.Retain(), cachedMetadata.Retain(), cachedTransactionMetadata.Retain())
+		defer func() {
+			cachedPayload.Release()
+			cachedMetadata.Release()
+			cachedTransactionMetadata.Release()
+		}()
+
+		require.NoError(t, err)
+		assert.False(t, payloadBooked, "payloadBooked")
 	})
 
-	assert.Equal(t, randomAddress, output.Address())
-	assert.Equal(t, randomTransactionID, output.TransactionID())
-	assert.Equal(t, false, output.Solid())
-	assert.Equal(t, time.Time{}, output.SolidificationTime())
-	assert.Equal(t, []*balance.Balance{
-		balance.New(balance.ColorIOTA, 1337),
-	}, output.Balances())
+}
+
+// TestStorePayload checks whether a value object is correctly stored.
+func TestStorePayload(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	tx := createDummyTransaction()
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+	{
+		cachedPayload, cachedMetadata, stored := tangle.storePayload(valueObject)
+		cachedPayload.Consume(func(payload *payload.Payload) {
+			assert.True(t, assert.ObjectsAreEqual(valueObject, payload))
+		})
+		cachedMetadata.Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.Equal(t, valueObject.ID(), payloadMetadata.PayloadID())
+		})
+		assert.True(t, stored)
+	}
+
+	// store same value object again -> should return false
+	{
+		cachedPayload, cachedMetadata, stored := tangle.storePayload(valueObject)
+		assert.Nil(t, cachedPayload)
+		assert.Nil(t, cachedMetadata)
+		assert.False(t, stored)
+	}
+
+	// retrieve from tangle
+	{
+		cachedPayload := tangle.Payload(valueObject.ID())
+		cachedPayload.Consume(func(payload *payload.Payload) {
+			assert.True(t, assert.ObjectsAreEqual(valueObject, payload))
+		})
+		cachedMetadata := tangle.PayloadMetadata(valueObject.ID())
+		cachedMetadata.Consume(func(payloadMetadata *PayloadMetadata) {
+			assert.Equal(t, valueObject.ID(), payloadMetadata.PayloadID())
+		})
+	}
+}
+
+// TestStoreTransactionModels checks whether all models corresponding to a transaction are correctly created.
+func TestStoreTransactionModels(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	tx := createDummyTransaction()
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+	{
+		cachedTransaction, cachedTransactionMetadata, cachedAttachment, transactionIsNew := tangle.storeTransactionModels(valueObject)
+		cachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.True(t, assert.ObjectsAreEqual(tx, transaction))
+		})
+		cachedTransactionMetadata.Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), transactionMetadata.ID())
+		})
+		expectedAttachment := NewAttachment(tx.ID(), valueObject.ID())
+		cachedAttachment.Consume(func(attachment *Attachment) {
+			assert.Equal(t, expectedAttachment.TransactionID(), attachment.TransactionID())
+			assert.Equal(t, expectedAttachment.PayloadID(), attachment.PayloadID())
+		})
+		assert.True(t, transactionIsNew)
+	}
+
+	// add same value object with same tx again -> should return false
+	{
+		cachedTransaction, cachedTransactionMetadata, cachedAttachment, transactionIsNew := tangle.storeTransactionModels(valueObject)
+		cachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.True(t, assert.ObjectsAreEqual(tx, transaction))
+		})
+		cachedTransactionMetadata.Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), transactionMetadata.ID())
+		})
+		assert.Nil(t, cachedAttachment)
+		assert.False(t, transactionIsNew)
+	}
+
+	// store same tx with different value object -> new attachment, same tx, transactionIsNew=false
+	valueObject2 := payload.New(payload.RandomID(), payload.RandomID(), tx)
+	{
+		cachedTransaction, cachedTransactionMetadata, cachedAttachment, transactionIsNew := tangle.storeTransactionModels(valueObject2)
+		cachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.True(t, assert.ObjectsAreEqual(tx, transaction))
+		})
+		cachedTransactionMetadata.Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), transactionMetadata.ID())
+		})
+		expectedAttachment := NewAttachment(tx.ID(), valueObject2.ID())
+		cachedAttachment.Consume(func(attachment *Attachment) {
+			assert.Equal(t, expectedAttachment.TransactionID(), attachment.TransactionID())
+			assert.Equal(t, expectedAttachment.PayloadID(), attachment.PayloadID())
+		})
+		assert.False(t, transactionIsNew)
+	}
+
+	// retrieve from tangle
+	{
+		cachedTransaction := tangle.Transaction(tx.ID())
+		cachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.True(t, assert.ObjectsAreEqual(tx, transaction))
+		})
+		cachedTransactionMetadata := tangle.TransactionMetadata(tx.ID())
+		cachedTransactionMetadata.Consume(func(transactionMetadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), transactionMetadata.ID())
+		})
+
+		// check created consumers
+		// only reason that there could be multiple consumers = conflict, e.g. 2 tx use same inputs?
+		tx.Inputs().ForEach(func(inputId transaction.OutputID) bool {
+			expectedConsumer := NewConsumer(inputId, tx.ID())
+			tangle.Consumers(inputId).Consume(func(consumer *Consumer) {
+				assert.Equal(t, expectedConsumer.ConsumedInput(), consumer.ConsumedInput())
+				assert.Equal(t, expectedConsumer.TransactionID(), consumer.TransactionID())
+			})
+			return true
+		})
+
+		cachedAttachments := tangle.Attachments(tx.ID())
+		assert.Len(t, cachedAttachments, 2)
+		attachmentPayloads := []payload.ID{valueObject.ID(), valueObject2.ID()}
+		cachedAttachments.Consume(func(attachment *Attachment) {
+			assert.Equal(t, tx.ID(), attachment.TransactionID())
+			assert.Contains(t, attachmentPayloads, attachment.PayloadID())
+		})
+	}
+}
+
+// TestStorePayloadReferences checks whether approvers are correctly created.
+func TestStorePayloadReferences(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	tx := createDummyTransaction()
+	parent1 := payload.RandomID()
+	parent2 := payload.RandomID()
+	valueObject1 := payload.New(parent1, parent2, tx)
+
+	{
+		tangle.storePayloadReferences(valueObject1)
+
+		// check for approvers
+		approversParent1 := tangle.Approvers(parent1)
+		assert.Len(t, approversParent1, 1)
+		approversParent1.Consume(func(approver *PayloadApprover) {
+			assert.Equal(t, parent1, approver.referencedPayloadID)
+			assert.Equal(t, valueObject1.ID(), approver.ApprovingPayloadID())
+		})
+
+		approversParent2 := tangle.Approvers(parent2)
+		assert.Len(t, approversParent2, 1)
+		approversParent2.Consume(func(approver *PayloadApprover) {
+			assert.Equal(t, parent2, approver.referencedPayloadID)
+			assert.Equal(t, valueObject1.ID(), approver.ApprovingPayloadID())
+		})
+	}
+
+	valueObject2 := payload.New(parent1, parent2, createDummyTransaction())
+	{
+		tangle.storePayloadReferences(valueObject2)
+
+		// check for approvers
+		approversParent1 := tangle.Approvers(parent1)
+		assert.Len(t, approversParent1, 2)
+		valueObjectIDs := []payload.ID{valueObject1.ID(), valueObject2.ID()}
+		approversParent1.Consume(func(approver *PayloadApprover) {
+			assert.Equal(t, parent1, approver.referencedPayloadID)
+			assert.Contains(t, valueObjectIDs, approver.ApprovingPayloadID())
+		})
+
+		approversParent2 := tangle.Approvers(parent2)
+		assert.Len(t, approversParent2, 2)
+		approversParent2.Consume(func(approver *PayloadApprover) {
+			assert.Equal(t, parent2, approver.referencedPayloadID)
+			assert.Contains(t, valueObjectIDs, approver.ApprovingPayloadID())
+		})
+	}
+}
+
+// TestCheckTransactionOutputs checks whether inputs and outputs are correctly reconciled.
+func TestCheckTransactionOutputs(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	// test happy cases with ColorIOTA
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1000),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = math.MaxInt64
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, math.MaxInt64),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = 25123
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 122),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 5000),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 20000),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// test wrong balances
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 122),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 5000),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 20000),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// test input overflow
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = math.MaxInt64
+		consumedBalances[[32]byte{1}] = 1
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1000),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// 0, negative outputs and overflows
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		//consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, -1),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+
+		outputs = transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 0),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+
+		outputs = transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, math.MaxInt64),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// test happy cases with ColorNew
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorNew, 333),
+			},
+			address.Random(): {
+				balance.New(balance.ColorNew, 333),
+			},
+			address.Random(): {
+				balance.New(balance.ColorNew, 334),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// test wrong balances
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorNew, 122),
+			},
+			address.Random(): {
+				balance.New(balance.ColorNew, 1),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// 0, negative outputs and overflows
+	{
+		consumedBalances := make(map[balance.Color]int64)
+		//consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorNew, -1),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+
+		outputs = transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorNew, 0),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+
+		outputs = transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorNew, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorNew, math.MaxInt64),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// test happy case with colors
+	{
+		color1 := [32]byte{1}
+		color2 := [32]byte{2}
+
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[color1] = 1000
+		consumedBalances[color2] = 25123
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(color1, 333),
+			},
+			address.Random(): {
+				balance.New(color1, 333),
+			},
+			address.Random(): {
+				balance.New(color1, 334),
+			},
+			address.Random(): {
+				balance.New(color2, 25000),
+			},
+			address.Random(): {
+				balance.New(color2, 123),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// try to spend color that is not in inputs
+	{
+		color1 := [32]byte{1}
+		color2 := [32]byte{2}
 
-	assert.Equal(t, true, output.setSolid(true))
-	assert.Equal(t, false, output.setSolid(true))
-	assert.Equal(t, true, output.Solid())
-	assert.NotEqual(t, time.Time{}, output.SolidificationTime())
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[color1] = 1000
 
-	clonedOutput, _, err := OutputFromBytes(output.Bytes())
-	if err != nil {
-		panic(err)
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(color1, 333),
+			},
+			address.Random(): {
+				balance.New(color1, 333),
+			},
+			address.Random(): {
+				balance.New(color1, 334),
+			},
+			address.Random(): {
+				balance.New(color2, 25000),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
 	}
 
-	assert.Equal(t, output.Address(), clonedOutput.Address())
-	assert.Equal(t, output.TransactionID(), clonedOutput.TransactionID())
-	assert.Equal(t, output.Solid(), clonedOutput.Solid())
-	assert.Equal(t, output.SolidificationTime().Round(time.Second), clonedOutput.SolidificationTime().Round(time.Second))
-	assert.Equal(t, output.Balances(), clonedOutput.Balances())
+	// try to spend more than in inputs of color
+	{
+		color1 := [32]byte{1}
+
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[color1] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(color1, math.MaxInt64),
+			},
+			address.Random(): {
+				balance.New(color1, math.MaxInt64),
+			},
+		})
+		assert.False(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+
+	// combine unspent colors and colors
+	{
+		color1 := [32]byte{1}
+		color2 := [32]byte{2}
+
+		consumedBalances := make(map[balance.Color]int64)
+		consumedBalances[color1] = 1000
+		consumedBalances[color2] = 1000
+		consumedBalances[balance.ColorIOTA] = 1000
+
+		outputs := transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(color1, 1000),
+				balance.New(color2, 500),
+				balance.New(balance.ColorNew, 500),
+			},
+			address.Random(): {
+				balance.New(balance.ColorNew, 1000),
+			},
+		})
+		assert.True(t, tangle.checkTransactionOutputs(consumedBalances, outputs))
+	}
+}
+
+func TestGetCachedOutputsFromTransactionInputs(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	color1 := [32]byte{1}
+
+	// prepare inputs for tx that we want to retrieve from tangle
+	outputs := map[address.Address][]*balance.Balance{
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 1),
+		},
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 2),
+			balance.New(color1, 3),
+		},
+	}
+	inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+	// build tx2 spending "outputs"
+	tx2 := transaction.New(
+		transaction.NewInputs(inputIDs...),
+		// outputs
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1337),
+			},
+		}),
+	)
+
+	// verify that outputs are retrieved correctly
+	{
+		cachedOutputs := tangle.getCachedOutputsFromTransactionInputs(tx2)
+		assert.Len(t, cachedOutputs, len(outputs))
+		cachedOutputs.Consume(func(output *Output) {
+			assert.ElementsMatch(t, outputs[output.Address()], output.Balances())
+		})
+	}
+}
+
+func TestLoadSnapshot(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	snapshot := map[transaction.ID]map[address.Address][]*balance.Balance{
+		transaction.GenesisID: {
+			address.Random(): []*balance.Balance{
+				balance.New(balance.ColorIOTA, 337),
+			},
+
+			address.Random(): []*balance.Balance{
+				balance.New(balance.ColorIOTA, 1000),
+				balance.New(balance.ColorIOTA, 1000),
+			},
+		},
+	}
+	tangle.LoadSnapshot(snapshot)
+
+	// check whether outputs can be retrieved from tangle
+	for addr, balances := range snapshot[transaction.GenesisID] {
+		cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(addr, transaction.GenesisID))
+		cachedOutput.Consume(func(output *Output) {
+			assert.Equal(t, addr, output.Address())
+			assert.ElementsMatch(t, balances, output.Balances())
+			assert.True(t, output.Solid())
+			assert.Equal(t, branchmanager.MasterBranchID, output.BranchID())
+		})
+	}
+}
+
+func TestRetrieveConsumedInputDetails(t *testing.T) {
+	// test simple happy case
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		color1 := [32]byte{1}
+
+		// prepare inputs for tx that we want to retrieve from tangle
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{}),
+		)
+
+		inputsSolid, cachedInputs, consumedBalances, consumedBranches, err := tangle.retrieveConsumedInputDetails(tx)
+		require.NoError(t, err)
+		assert.True(t, inputsSolid)
+		assert.Len(t, cachedInputs, len(outputs))
+		cachedInputs.Consume(func(input *Output) {
+			assert.ElementsMatch(t, outputs[input.Address()], input.Balances())
+		})
+		assert.True(t, cmp.Equal(sumOutputsByColor(outputs), consumedBalances))
+		assert.Len(t, consumedBranches, 1)
+		assert.Contains(t, consumedBranches, branchmanager.MasterBranchID)
+	}
+
+	// test happy case with more colors
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		color1 := [32]byte{1}
+		color2 := [32]byte{2}
+		color3 := [32]byte{3}
+
+		// prepare inputs for tx that we want to retrieve from tangle
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(color1, 1000),
+			},
+			address.Random(): {
+				balance.New(color2, 210),
+				balance.New(color1, 3),
+			},
+			address.Random(): {
+				balance.New(color3, 5621),
+				balance.New(color1, 3),
+			},
+		}
+		// build tx spending "outputs"
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{}),
+		)
+
+		inputsSolid, cachedInputs, consumedBalances, consumedBranches, err := tangle.retrieveConsumedInputDetails(tx)
+		require.NoError(t, err)
+		assert.True(t, inputsSolid)
+		assert.Len(t, cachedInputs, len(outputs))
+		cachedInputs.Consume(func(input *Output) {
+			assert.ElementsMatch(t, outputs[input.Address()], input.Balances())
+		})
+		assert.True(t, cmp.Equal(sumOutputsByColor(outputs), consumedBalances))
+		assert.Len(t, consumedBranches, 1)
+		assert.Contains(t, consumedBranches, branchmanager.MasterBranchID)
+	}
+
+	// test int overflow
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare inputs for tx that we want to retrieve from tangle
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, math.MaxInt64),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{}),
+		)
+
+		inputsSolid, cachedInputs, _, _, err := tangle.retrieveConsumedInputDetails(tx)
+		assert.Error(t, err)
+		assert.False(t, inputsSolid)
+		assert.Len(t, cachedInputs, len(outputs))
+	}
+
+	// test multiple consumed branches
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare inputs for tx that we want to retrieve from tangle
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{}),
+		)
+
+		// modify branch of 1 output
+		newBranch := branchmanager.NewBranchID(transaction.RandomID())
+		output := tangle.TransactionOutput(inputIDs[0])
+		output.Consume(func(output *Output) {
+			output.branchID = newBranch
+		})
+
+		inputsSolid, cachedInputs, consumedBalances, consumedBranches, err := tangle.retrieveConsumedInputDetails(tx)
+		require.NoError(t, err)
+		assert.True(t, inputsSolid)
+		assert.Len(t, cachedInputs, len(outputs))
+		cachedInputs.Consume(func(input *Output) {
+			assert.ElementsMatch(t, outputs[input.Address()], input.Balances())
+		})
+		assert.True(t, cmp.Equal(sumOutputsByColor(outputs), consumedBalances))
+		assert.Len(t, consumedBranches, 2)
+		assert.Contains(t, consumedBranches, branchmanager.MasterBranchID)
+		assert.Contains(t, consumedBranches, newBranch)
+	}
+}
+
+func TestCheckTransactionSolidity(t *testing.T) {
+	// already solid tx
+	{
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		txMetadata := NewTransactionMetadata(tx.ID())
+		txMetadata.SetSolid(true)
+		txMetadata.SetBranchID(branchmanager.MasterBranchID)
+
+		solid, consumedBranches, err := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.True(t, solid)
+		assert.Len(t, consumedBranches, 1)
+		assert.Contains(t, consumedBranches, branchmanager.MasterBranchID)
+		assert.NoError(t, err)
+	}
+
+	// deleted tx
+	{
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		txMetadata := NewTransactionMetadata(tx.ID())
+		tx.Delete()
+		txMetadata.Delete()
+
+		solid, consumedBranches, _ := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.False(t, solid)
+		assert.Len(t, consumedBranches, 0)
+		//assert.Error(t, err)
+	}
+
+	// invalid tx: inputs not solid/non-existing
+	{
+		tangle := New(mapdb.NewMapDB())
+		tx := createDummyTransaction()
+		txMetadata := NewTransactionMetadata(tx.ID())
+
+		solid, consumedBranches, err := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.False(t, solid)
+		assert.Len(t, consumedBranches, 0)
+		assert.NoError(t, err)
+	}
+
+	// invalid tx: inputs do not match outputs
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		// build tx spending wrong "outputs"
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 11337),
+					balance.New(color1, 1000),
+				},
+			}),
+		)
+		txMetadata := NewTransactionMetadata(tx.ID())
+
+		solid, consumedBranches, err := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.False(t, solid)
+		assert.Len(t, consumedBranches, 0)
+		assert.Error(t, err)
+	}
+
+	// spent outputs from master branch (non-conflicting branches)
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		// prepare snapshot
+		color1 := [32]byte{1}
+		outputs := map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1),
+			},
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 2),
+				balance.New(color1, 3),
+			},
+		}
+		inputIDs := loadSnapshotFromOutputs(tangle, outputs)
+
+		// build tx spending "outputs"
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 3),
+					balance.New(color1, 3),
+				},
+			}),
+		)
+		txMetadata := NewTransactionMetadata(tx.ID())
+
+		solid, consumedBranches, err := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.True(t, solid)
+		assert.Len(t, consumedBranches, 1)
+		assert.Contains(t, consumedBranches, branchmanager.MasterBranchID)
+		assert.NoError(t, err)
+	}
+
+	// spent outputs from conflicting branches
+	{
+		tangle := New(mapdb.NewMapDB())
+
+		// create conflicting branches
+		cachedBranch2, _ := tangle.BranchManager().Fork(branchmanager.BranchID{2}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		branch2 := cachedBranch2.Unwrap()
+		defer cachedBranch2.Release()
+		cachedBranch3, _ := tangle.BranchManager().Fork(branchmanager.BranchID{3}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		branch3 := cachedBranch3.Unwrap()
+		defer cachedBranch3.Release()
+		// create outputs for conflicting branches
+		inputIDs := make([]transaction.OutputID, 0)
+		for _, branch := range []*branchmanager.Branch{branch2, branch3} {
+			input := NewOutput(address.Random(), transaction.GenesisID, branch.ID(), []*balance.Balance{balance.New(balance.ColorIOTA, 1)})
+			input.setSolid(true)
+			cachedObject, _ := tangle.outputStorage.StoreIfAbsent(input)
+			cachedOutput := &CachedOutput{CachedObject: cachedObject}
+			cachedOutput.Consume(func(output *Output) {
+				inputIDs = append(inputIDs, transaction.NewOutputID(output.Address(), transaction.GenesisID))
+			})
+		}
+
+		// build tx spending "outputs" from conflicting branches
+		tx := transaction.New(
+			transaction.NewInputs(inputIDs...),
+			// outputs
+			transaction.NewOutputs(map[address.Address][]*balance.Balance{
+				address.Random(): {
+					balance.New(balance.ColorIOTA, 2),
+				},
+			}),
+		)
+		txMetadata := NewTransactionMetadata(tx.ID())
+
+		solid, consumedBranches, err := tangle.checkTransactionSolidity(tx, txMetadata)
+		assert.False(t, solid)
+		assert.Len(t, consumedBranches, 2)
+		assert.Contains(t, consumedBranches, branch2.ID())
+		assert.Contains(t, consumedBranches, branch3.ID())
+		assert.Error(t, err)
+	}
+
+}
+
+func TestPayloadBranchID(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	{
+		branchID := tangle.payloadBranchID(payload.GenesisID)
+		assert.Equal(t, branchmanager.MasterBranchID, branchID)
+	}
+
+	// test with stored payload
+	{
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		cachedPayload, cachedMetadata, stored := tangle.storePayload(valueObject)
+		assert.True(t, stored)
+		cachedPayload.Release()
+		expectedBranchID := branchmanager.BranchID{1}
+		cachedMetadata.Consume(func(metadata *PayloadMetadata) {
+			metadata.setSolid(true)
+			metadata.SetBranchID(expectedBranchID)
+		})
+
+		branchID := tangle.payloadBranchID(valueObject.ID())
+		assert.Equal(t, expectedBranchID, branchID)
+	}
+
+	// test missing value object
+	{
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		missing := 0
+		tangle.Events.PayloadMissing.Attach(events.NewClosure(func(payloadID payload.ID) {
+			missing++
+		}))
+
+		branchID := tangle.payloadBranchID(valueObject.ID())
+		assert.Equal(t, branchmanager.UndefinedBranchID, branchID)
+		assert.Equal(t, 1, missing)
+	}
 }
 
-func TestAttachment(t *testing.T) {
-	transactionID := transaction.RandomID()
-	payloadID := payload.RandomID()
+func TestCheckPayloadSolidity(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
 
-	attachment := NewAttachment(transactionID, payloadID)
+	// check with already solid payload
+	{
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
+		metadata.setSolid(true)
+		metadata.SetBranchID(branchmanager.MasterBranchID)
 
-	assert.Equal(t, transactionID, attachment.TransactionID())
-	assert.Equal(t, payloadID, attachment.PayloadID())
+		transactionBranches := []branchmanager.BranchID{branchmanager.MasterBranchID}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.False(t, solid)
+		assert.NoError(t, err)
+	}
+
+	// check with parents=genesis
+	{
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
 
-	clonedAttachment, consumedBytes, err := AttachmentFromBytes(attachment.Bytes())
-	if err != nil {
-		panic(err)
+		transactionBranches := []branchmanager.BranchID{branchmanager.MasterBranchID}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.True(t, solid)
+		assert.NoError(t, err)
 	}
 
-	assert.Equal(t, AttachmentLength, consumedBytes)
-	assert.Equal(t, transactionID, clonedAttachment.TransactionID())
-	assert.Equal(t, payloadID, clonedAttachment.PayloadID())
+	// check with solid parents and branch set
+	{
+		setParent := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+			payloadMetadata.SetBranchID(branchmanager.MasterBranchID)
+		}
+
+		valueObject := payload.New(storeParentPayloadWithMetadataFunc(t, tangle, setParent), storeParentPayloadWithMetadataFunc(t, tangle, setParent), createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
+
+		transactionBranches := []branchmanager.BranchID{branchmanager.MasterBranchID}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.True(t, solid)
+		assert.NoError(t, err)
+	}
+
+	// check with solid parents but no branch set -> should not be solid
+	{
+		setParent := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+		}
+
+		valueObject := payload.New(storeParentPayloadWithMetadataFunc(t, tangle, setParent), storeParentPayloadWithMetadataFunc(t, tangle, setParent), createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
+
+		transactionBranches := []branchmanager.BranchID{branchmanager.MasterBranchID}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.False(t, solid)
+		assert.NoError(t, err)
+	}
+
+	// conflicting branches of parents
+	{
+		// create conflicting branches
+		cachedBranch2, _ := tangle.BranchManager().Fork(branchmanager.BranchID{2}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		defer cachedBranch2.Release()
+		cachedBranch3, _ := tangle.BranchManager().Fork(branchmanager.BranchID{3}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		defer cachedBranch3.Release()
+		setParent1 := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+			payloadMetadata.SetBranchID(branchmanager.BranchID{2})
+		}
+		setParent2 := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+			payloadMetadata.SetBranchID(branchmanager.BranchID{3})
+		}
+
+		valueObject := payload.New(storeParentPayloadWithMetadataFunc(t, tangle, setParent1), storeParentPayloadWithMetadataFunc(t, tangle, setParent2), createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
+
+		transactionBranches := []branchmanager.BranchID{branchmanager.MasterBranchID}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.False(t, solid)
+		assert.Error(t, err)
+	}
+
+	// conflicting branches with transactions
+	{
+		// create conflicting branches
+		cachedBranch2, _ := tangle.BranchManager().Fork(branchmanager.BranchID{2}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		defer cachedBranch2.Release()
+		cachedBranch3, _ := tangle.BranchManager().Fork(branchmanager.BranchID{3}, []branchmanager.BranchID{branchmanager.MasterBranchID}, []branchmanager.ConflictID{{0}})
+		defer cachedBranch3.Release()
+		setParent1 := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+			payloadMetadata.SetBranchID(branchmanager.MasterBranchID)
+		}
+		setParent2 := func(payloadMetadata *PayloadMetadata) {
+			payloadMetadata.setSolid(true)
+			payloadMetadata.SetBranchID(branchmanager.BranchID{3})
+		}
+
+		valueObject := payload.New(storeParentPayloadWithMetadataFunc(t, tangle, setParent1), storeParentPayloadWithMetadataFunc(t, tangle, setParent2), createDummyTransaction())
+		metadata := NewPayloadMetadata(valueObject.ID())
+
+		transactionBranches := []branchmanager.BranchID{{2}}
+		solid, err := tangle.payloadBecameNewlySolid(valueObject, metadata, transactionBranches)
+		assert.False(t, solid)
+		assert.Error(t, err)
+	}
+}
+
+func TestCreateValuePayloadFutureConeIterator(t *testing.T) {
+	// check with new payload -> should be added to stack
+	{
+		tangle := New(mapdb.NewMapDB())
+		solidificationStack := list.New()
+		processedPayloads := make(map[payload.ID]types.Empty)
+		iterator := tangle.createValuePayloadFutureConeIterator(solidificationStack, processedPayloads)
+
+		// create cached objects
+		tx := createDummyTransaction()
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		cachedPayload, cachedMetadata, stored := tangle.storePayload(valueObject)
+		assert.True(t, stored)
+		cachedTransaction, cachedTransactionMetadata, _, transactionIsNew := tangle.storeTransactionModels(valueObject)
+		assert.True(t, transactionIsNew)
+
+		iterator(cachedPayload, cachedMetadata, cachedTransaction, cachedTransactionMetadata)
+		assert.Equal(t, 1, solidificationStack.Len())
+		currentSolidificationEntry := solidificationStack.Front().Value.(*valuePayloadPropagationStackEntry)
+		assert.Equal(t, cachedPayload, currentSolidificationEntry.CachedPayload)
+		currentSolidificationEntry.CachedPayload.Consume(func(payload *payload.Payload) {
+			assert.Equal(t, valueObject.ID(), payload.ID())
+		})
+		currentSolidificationEntry.CachedPayloadMetadata.Consume(func(metadata *PayloadMetadata) {
+			assert.Equal(t, valueObject.ID(), metadata.PayloadID())
+		})
+		currentSolidificationEntry.CachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.Equal(t, tx.ID(), transaction.ID())
+		})
+		currentSolidificationEntry.CachedTransactionMetadata.Consume(func(metadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), metadata.ID())
+		})
+	}
+
+	// check with already processed payload -> should not be added to stack
+	{
+		tangle := New(mapdb.NewMapDB())
+		solidificationStack := list.New()
+		processedPayloads := make(map[payload.ID]types.Empty)
+		iterator := tangle.createValuePayloadFutureConeIterator(solidificationStack, processedPayloads)
+
+		// create cached objects
+		tx := createDummyTransaction()
+		valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+		cachedPayload, cachedMetadata, stored := tangle.storePayload(valueObject)
+		assert.True(t, stored)
+		cachedTransaction, cachedTransactionMetadata, _, transactionIsNew := tangle.storeTransactionModels(valueObject)
+		assert.True(t, transactionIsNew)
+
+		// payload was already processed
+		processedPayloads[valueObject.ID()] = types.Void
+
+		iterator(cachedPayload, cachedMetadata, cachedTransaction, cachedTransactionMetadata)
+		assert.Equal(t, 0, solidificationStack.Len())
+	}
+}
+
+func TestForEachConsumers(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	// prepare inputs for tx
+	outputs := map[address.Address][]*balance.Balance{
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 1),
+		},
+		address.Random(): {
+			balance.New(balance.ColorIOTA, 2),
+		},
+	}
+	genesisTx := transaction.New(transaction.NewInputs(), transaction.NewOutputs(outputs))
+
+	// store tx that uses outputs from genesisTx
+	outputIDs := make([]transaction.OutputID, 0)
+	for addr := range outputs {
+		outputIDs = append(outputIDs, transaction.NewOutputID(addr, genesisTx.ID()))
+	}
+	tx := transaction.New(
+		transaction.NewInputs(outputIDs...),
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{}),
+	)
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, tx)
+	_, _, stored := tangle.storePayload(valueObject)
+	assert.True(t, stored)
+	_, _, _, transactionIsNew := tangle.storeTransactionModels(valueObject)
+	assert.True(t, transactionIsNew)
+
+	counter := 0
+	consume := func(cachedPayload *payload.CachedPayload, cachedPayloadMetadata *CachedPayloadMetadata, cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *CachedTransactionMetadata) {
+		cachedPayload.Consume(func(payload *payload.Payload) {
+			assert.Equal(t, valueObject.ID(), payload.ID())
+		})
+		cachedPayloadMetadata.Consume(func(metadata *PayloadMetadata) {
+			assert.Equal(t, valueObject.ID(), metadata.PayloadID())
+		})
+		cachedTransaction.Consume(func(transaction *transaction.Transaction) {
+			assert.Equal(t, tx.ID(), transaction.ID())
+		})
+		cachedTransactionMetadata.Consume(func(metadata *TransactionMetadata) {
+			assert.Equal(t, tx.ID(), metadata.ID())
+		})
+		counter++
+	}
+
+	tangle.ForEachConsumers(genesisTx, consume)
+	// even though we have 2 outputs it should only be triggered once because the outputs are within 1 transaction
+	assert.Equal(t, 1, counter)
+}
+
+func TestForeachApprovers(t *testing.T) {
+	tangle := New(mapdb.NewMapDB())
+
+	valueObject := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+
+	// create approver 1
+	tx1 := createDummyTransaction()
+	approver1 := payload.New(valueObject.ID(), payload.GenesisID, tx1)
+	_, _, stored := tangle.storePayload(approver1)
+	assert.True(t, stored)
+	_, _, _, transactionIsNew := tangle.storeTransactionModels(approver1)
+	tangle.storePayloadReferences(approver1)
+	assert.True(t, transactionIsNew)
+
+	// create approver 2
+	tx2 := createDummyTransaction()
+	approver2 := payload.New(valueObject.ID(), payload.GenesisID, tx2)
+	_, _, stored = tangle.storePayload(approver2)
+	assert.True(t, stored)
+	_, _, _, transactionIsNew = tangle.storeTransactionModels(approver2)
+	tangle.storePayloadReferences(approver2)
+	assert.True(t, transactionIsNew)
+
+	counter := 0
+	consume := func(cachedPayload *payload.CachedPayload, cachedPayloadMetadata *CachedPayloadMetadata, cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *CachedTransactionMetadata) {
+		cachedPayload.Consume(func(p *payload.Payload) {
+			assert.Contains(t, []payload.ID{approver1.ID(), approver2.ID()}, p.ID())
+		})
+		cachedPayloadMetadata.Consume(func(metadata *PayloadMetadata) {
+			assert.Contains(t, []payload.ID{approver1.ID(), approver2.ID()}, metadata.PayloadID())
+		})
+		cachedTransaction.Consume(func(tx *transaction.Transaction) {
+			assert.Contains(t, []transaction.ID{tx1.ID(), tx2.ID()}, tx.ID())
+		})
+		cachedTransactionMetadata.Consume(func(metadata *TransactionMetadata) {
+			assert.Contains(t, []transaction.ID{tx1.ID(), tx2.ID()}, metadata.ID())
+		})
+		counter++
+	}
+
+	tangle.ForeachApprovers(valueObject.ID(), consume)
+	assert.Equal(t, 2, counter)
+}
+
+func storeParentPayloadWithMetadataFunc(t *testing.T, tangle *Tangle, consume func(*PayloadMetadata)) payload.ID {
+	parent1 := payload.New(payload.GenesisID, payload.GenesisID, createDummyTransaction())
+	cachedPayload, cachedMetadata, stored := tangle.storePayload(parent1)
+	defer cachedPayload.Release()
+
+	cachedMetadata.Consume(consume)
+	assert.True(t, stored)
+
+	return parent1.ID()
+}
+
+func loadSnapshotFromOutputs(tangle *Tangle, outputs map[address.Address][]*balance.Balance) []transaction.OutputID {
+	snapshot := map[transaction.ID]map[address.Address][]*balance.Balance{transaction.GenesisID: outputs}
+	tangle.LoadSnapshot(snapshot)
+
+	outputIDs := make([]transaction.OutputID, 0)
+	for addr := range outputs {
+		outputIDs = append(outputIDs, transaction.NewOutputID(addr, transaction.GenesisID))
+	}
+	return outputIDs
+}
+
+func sumOutputsByColor(outputs map[address.Address][]*balance.Balance) map[balance.Color]int64 {
+	totals := make(map[balance.Color]int64)
+
+	for _, balances := range outputs {
+		for _, bal := range balances {
+			totals[bal.Color()] += bal.Value()
+		}
+	}
+
+	return totals
+}
+
+func createDummyTransaction() *transaction.Transaction {
+	return transaction.New(
+		// inputs
+		transaction.NewInputs(
+			transaction.NewOutputID(address.Random(), transaction.RandomID()),
+			transaction.NewOutputID(address.Random(), transaction.RandomID()),
+		),
+
+		// outputs
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			address.Random(): {
+				balance.New(balance.ColorIOTA, 1337),
+			},
+		}),
+	)
 }
diff --git a/dapps/valuetransfers/packages/tangle/transactionmetadata.go b/dapps/valuetransfers/packages/tangle/transactionmetadata.go
index f7fe65d00edf33d3d87ed3d1a672a383b049522b..01b161613ae30fcb8d4a98e2e337c3a9723f370e 100644
--- a/dapps/valuetransfers/packages/tangle/transactionmetadata.go
+++ b/dapps/valuetransfers/packages/tangle/transactionmetadata.go
@@ -4,12 +4,11 @@ import (
 	"sync"
 	"time"
 
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
-
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
-	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 // TransactionMetadata contains the information of a Transaction, that are based on our local perception of things (i.e. if it is
diff --git a/dapps/valuetransfers/packages/transaction/inputs.go b/dapps/valuetransfers/packages/transaction/inputs.go
index f4c638d17e6e0d4af5107a5628a567bc0dbeb302..bfc8ea13980fa6ead451063438495904ab2e51b9 100644
--- a/dapps/valuetransfers/packages/transaction/inputs.go
+++ b/dapps/valuetransfers/packages/transaction/inputs.go
@@ -3,7 +3,6 @@ package transaction
 import (
 	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 	"github.com/iotaledger/goshimmer/packages/binary/datastructure/orderedmap"
-
 	"github.com/iotaledger/hive.go/marshalutil"
 )
 
diff --git a/go.mod b/go.mod
index 592fae32834269320ea64ccda5f01e6b8fec815d..4b05e611d047983c908b2a603743a1258a59113c 100644
--- a/go.mod
+++ b/go.mod
@@ -10,6 +10,7 @@ require (
 	github.com/drand/kyber v1.0.1-0.20200331114745-30e90cc60f99
 	github.com/gobuffalo/packr/v2 v2.7.1
 	github.com/golang/protobuf v1.3.5
+	github.com/google/go-cmp v0.4.0
 	github.com/gorilla/websocket v1.4.1
 	github.com/iotaledger/hive.go v0.0.0-20200610104211-d603429af242
 	github.com/iotaledger/iota.go v1.0.0-beta.14
diff --git a/packages/binary/datastructure/orderedmap/element.go b/packages/binary/datastructure/orderedmap/element.go
index d0ee44053031339c6cd11d3723e5c0962d0b1ef4..d8c4c03143999b3f752171cbeccd9322d19800fb 100644
--- a/packages/binary/datastructure/orderedmap/element.go
+++ b/packages/binary/datastructure/orderedmap/element.go
@@ -1,5 +1,6 @@
 package orderedmap
 
+// Element defines the model of each element of the orderedMap.
 type Element struct {
 	key   interface{}
 	value interface{}
diff --git a/packages/binary/datastructure/orderedmap/orderedmap.go b/packages/binary/datastructure/orderedmap/orderedmap.go
index 8bc97443bfbf03919be8ad952fb634117ef01ec8..22b62588cb67af73058aa7ab3d681bfc6db1fd5c 100644
--- a/packages/binary/datastructure/orderedmap/orderedmap.go
+++ b/packages/binary/datastructure/orderedmap/orderedmap.go
@@ -4,6 +4,7 @@ import (
 	"sync"
 )
 
+// OrderedMap provides a concurrent-safe ordered map.
 type OrderedMap struct {
 	head       *Element
 	tail       *Element
@@ -12,23 +13,27 @@ type OrderedMap struct {
 	mutex      sync.RWMutex
 }
 
+// New returns a new *OrderedMap.
 func New() *OrderedMap {
 	return &OrderedMap{
 		dictionary: make(map[interface{}]*Element),
 	}
 }
 
+// Get returns the value mapped to the given key if exists.
 func (orderedMap *OrderedMap) Get(key interface{}) (interface{}, bool) {
 	orderedMap.mutex.RLock()
 	defer orderedMap.mutex.RUnlock()
 
-	if orderedMapElement, orderedMapElementExists := orderedMap.dictionary[key]; !orderedMapElementExists {
+	orderedMapElement, orderedMapElementExists := orderedMap.dictionary[key]
+	if !orderedMapElementExists {
 		return nil, false
-	} else {
-		return orderedMapElement.value, true
 	}
+	return orderedMapElement.value, true
+
 }
 
+// Set adds a key-value pair to the orderedMap. It returns false if the same pair already exists.
 func (orderedMap *OrderedMap) Set(key interface{}, newValue interface{}) bool {
 	if oldValue, oldValueExists := orderedMap.Get(key); oldValueExists && oldValue == newValue {
 		return false
@@ -66,6 +71,8 @@ func (orderedMap *OrderedMap) Set(key interface{}, newValue interface{}) bool {
 	return true
 }
 
+// ForEach iterates through the orderedMap and calls the consumer function for every element.
+// The iteration can be aborted by returning false in the consumer.
 func (orderedMap *OrderedMap) ForEach(consumer func(key, value interface{}) bool) bool {
 	orderedMap.mutex.RLock()
 	currentEntry := orderedMap.head
@@ -84,6 +91,8 @@ func (orderedMap *OrderedMap) ForEach(consumer func(key, value interface{}) bool
 	return true
 }
 
+// Delete deletes the given key (and related value) from the orederedMap.
+// It returns false if the key is not found.
 func (orderedMap *OrderedMap) Delete(key interface{}) bool {
 	if _, valueExists := orderedMap.Get(key); !valueExists {
 		return false
@@ -115,6 +124,7 @@ func (orderedMap *OrderedMap) Delete(key interface{}) bool {
 	return true
 }
 
+// Size returns the size of the orderedMap.
 func (orderedMap *OrderedMap) Size() int {
 	orderedMap.mutex.RLock()
 	defer orderedMap.mutex.RUnlock()
diff --git a/packages/binary/datastructure/orderedmap/orderedmap_test.go b/packages/binary/datastructure/orderedmap/orderedmap_test.go
index 00071715985f9a7a9047ca73ffb9e4a8a20331cf..72e3e71c46ad56250565a32f3eb5fc26f1fad09f 100644
--- a/packages/binary/datastructure/orderedmap/orderedmap_test.go
+++ b/packages/binary/datastructure/orderedmap/orderedmap_test.go
@@ -1,9 +1,12 @@
 package orderedmap
 
 import (
+	"fmt"
+	"sync"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 func TestOrderedMap_Size(t *testing.T) {
@@ -28,3 +31,139 @@ func TestOrderedMap_Size(t *testing.T) {
 
 	assert.Equal(t, 2, orderedMap.Size())
 }
+
+func TestNew(t *testing.T) {
+	orderedMap := New()
+	require.NotNil(t, orderedMap)
+
+	assert.Equal(t, 0, orderedMap.Size())
+
+	assert.Nil(t, orderedMap.head)
+	assert.Nil(t, orderedMap.tail)
+}
+
+func TestSetGetDelete(t *testing.T) {
+	orderedMap := New()
+	require.NotNil(t, orderedMap)
+
+	// when adding the first new key,value pair, we must return true
+	keyValueAdded := orderedMap.Set("key", "value")
+	assert.True(t, keyValueAdded)
+
+	// we should be able to retrieve the just added element
+	value, ok := orderedMap.Get("key")
+	assert.Equal(t, "value", value)
+	assert.True(t, ok)
+
+	// head and tail should NOT be nil and match and size should be 1
+	assert.NotNil(t, orderedMap.head)
+	assert.Same(t, orderedMap.head, orderedMap.tail)
+	assert.Equal(t, 1, orderedMap.Size())
+
+	// when adding the same key,value pair must return false
+	// and size should not change;
+	keyValueAdded = orderedMap.Set("key", "value")
+	assert.False(t, keyValueAdded)
+	assert.Equal(t, 1, orderedMap.Size())
+
+	// when retrieving something that does not exist we
+	// should get nil, false
+	value, ok = orderedMap.Get("keyNotStored")
+	assert.Nil(t, value)
+	assert.False(t, ok)
+
+	// when deleting an existing element, we must get true,
+	// the elemente must be removed, and size decremented.
+	deleted := orderedMap.Delete("key")
+	assert.True(t, deleted)
+	value, ok = orderedMap.Get("key")
+	assert.Nil(t, value)
+	assert.False(t, ok)
+	assert.Equal(t, 0, orderedMap.Size())
+
+	// if we delete the only element, head and tail should be both nil
+	assert.Nil(t, orderedMap.head)
+	assert.Same(t, orderedMap.head, orderedMap.tail)
+
+	// when deleting a NON existing element, we must get false
+	deleted = orderedMap.Delete("key")
+	assert.False(t, deleted)
+}
+
+func TestForEach(t *testing.T) {
+	orderedMap := New()
+	require.NotNil(t, orderedMap)
+
+	testElements := []Element{
+		{key: "one", value: 1},
+		{key: "two", value: 2},
+		{key: "three", value: 3},
+	}
+
+	for _, element := range testElements {
+		keyValueAdded := orderedMap.Set(element.key, element.value)
+		assert.True(t, keyValueAdded)
+	}
+
+	// test that all elements are positive via ForEach
+	testPositive := orderedMap.ForEach(func(key, value interface{}) bool {
+		return value.(int) > 0
+	})
+	assert.True(t, testPositive)
+
+	testNegative := orderedMap.ForEach(func(key, value interface{}) bool {
+		return value.(int) < 0
+	})
+	assert.False(t, testNegative)
+}
+
+func TestConcurrencySafe(t *testing.T) {
+	orderedMap := New()
+	require.NotNil(t, orderedMap)
+
+	// initialize a slice of 100 elements
+	set := make([]Element, 100)
+	for i := 0; i < 100; i++ {
+		element := Element{key: fmt.Sprintf("%d", i), value: i}
+		set[i] = element
+	}
+
+	// let 10 workers fill the orderedMap
+	workers := 10
+	var wg sync.WaitGroup
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func() {
+			defer wg.Done()
+			for i := 0; i < 100; i++ {
+				ele := set[i]
+				orderedMap.Set(ele.key, ele.value)
+			}
+		}()
+	}
+	wg.Wait()
+
+	// check that all the elements consumed from the set
+	// have been stored in the orderedMap and its size matches
+	for i := 0; i < 100; i++ {
+		value, ok := orderedMap.Get(set[i].key)
+		assert.Equal(t, set[i].value, value)
+		assert.True(t, ok)
+	}
+	assert.Equal(t, 100, orderedMap.Size())
+
+	// let 10 workers delete elements from the orderedMAp
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func() {
+			defer wg.Done()
+			for i := 0; i < 100; i++ {
+				ele := set[i]
+				orderedMap.Delete(ele.key)
+			}
+		}()
+	}
+	wg.Wait()
+
+	assert.Equal(t, 0, orderedMap.Size())
+}
diff --git a/packages/binary/datastructure/queue/queue_test.go b/packages/binary/datastructure/queue/queue_test.go
index bc084afc3741331f66a6e4701a794225b72e392a..67d5351b2683004d13108403c2f0aafcf4c094ed 100644
--- a/packages/binary/datastructure/queue/queue_test.go
+++ b/packages/binary/datastructure/queue/queue_test.go
@@ -1,42 +1,112 @@
 package queue
 
 import (
+	"sync"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
-func Test(t *testing.T) {
+func TestNewQueue(t *testing.T) {
 	queue := New(2)
+	require.NotNil(t, queue)
 	assert.Equal(t, 0, queue.Size())
 	assert.Equal(t, 2, queue.Capacity())
+}
+
+func TestQueueOfferPoll(t *testing.T) {
+	queue := New(2)
+	require.NotNil(t, queue)
 
-	assert.Equal(t, true, queue.Offer(1))
+	// offer element to queue
+	assert.True(t, queue.Offer(1))
 	assert.Equal(t, 1, queue.Size())
 
-	assert.Equal(t, true, queue.Offer(2))
+	assert.True(t, queue.Offer(2))
 	assert.Equal(t, 2, queue.Size())
 
-	assert.Equal(t, false, queue.Offer(3))
+	assert.False(t, queue.Offer(3))
 
+	// Poll element from queue
 	polledValue, ok := queue.Poll()
-	assert.Equal(t, true, ok)
+	assert.True(t, ok)
 	assert.Equal(t, 1, polledValue)
 	assert.Equal(t, 1, queue.Size())
 
 	polledValue, ok = queue.Poll()
-	assert.Equal(t, true, ok)
+	assert.True(t, ok)
 	assert.Equal(t, 2, polledValue)
 	assert.Equal(t, 0, queue.Size())
 
 	polledValue, ok = queue.Poll()
-	assert.Equal(t, false, ok)
-	assert.Equal(t, nil, polledValue)
+	assert.False(t, ok)
+	assert.Nil(t, polledValue)
 	assert.Equal(t, 0, queue.Size())
 
-	assert.Equal(t, true, queue.Offer(3))
+	// Offer the empty queue again
+	assert.True(t, queue.Offer(3))
 	assert.Equal(t, 1, queue.Size())
+}
 
-	assert.Equal(t, true, queue.Offer(4))
-	assert.Equal(t, 2, queue.Size())
+func TestQueueOfferConcurrencySafe(t *testing.T) {
+	queue := New(100)
+	require.NotNil(t, queue)
+
+	// let 10 workers fill the queue
+	workers := 10
+	var wg sync.WaitGroup
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func() {
+			defer wg.Done()
+			for j := 0; j < 10; j++ {
+				queue.Offer(j)
+			}
+		}()
+	}
+	wg.Wait()
+
+	// check that all the elements are offered
+	assert.Equal(t, 100, queue.Size())
+
+	counter := make([]int, 10)
+	for i := 0; i < 100; i++ {
+		value, ok := queue.Poll()
+		assert.True(t, ok)
+		counter[value.(int)] += 1
+	}
+	assert.Equal(t, 0, queue.Size())
+
+	// check that the insert numbers are correct
+	for i := 0; i < 10; i++ {
+		assert.Equal(t, 10, counter[i])
+	}
+}
+
+func TestQueuePollConcurrencySafe(t *testing.T) {
+	queue := New(100)
+	require.NotNil(t, queue)
+
+	for j := 0; j < 100; j++ {
+		queue.Offer(j)
+	}
+
+	// let 10 workers poll the queue
+	workers := 10
+	var wg sync.WaitGroup
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func() {
+			defer wg.Done()
+			for j := 0; j < 10; j++ {
+				_, ok := queue.Poll()
+				assert.True(t, ok)
+			}
+		}()
+	}
+	wg.Wait()
+
+	// check that all the elements are polled
+	assert.Equal(t, 0, queue.Size())
 }