From ec373339c12b815ac0a80c8720d602740607f709 Mon Sep 17 00:00:00 2001
From: Hans Moog <hm@mkjc.net>
Date: Mon, 27 Apr 2020 17:08:44 +0200
Subject: [PATCH] Refactor value transfer logic (#330)

* Feat: value transfers are working

* Feat: tangle now detects invalid transactions

* Refactor: refactored some code

* Feat: finished unit test for value transaction

* Feat: added ledger state (reads information from value tangle)

* Refactor: refactored some code

* Feat: finished refactoring isTransactionSolid method

* Feat: added RealityId model for parallel reality based ledger

* Refactor: moved Output to tangle and introduced branches

* Refactor: refactored some code

* Refactor: renamed "NEW" color

* Feat: started adding branches

* Feat: nothing works anymore :p Started to refactor

* Feat: cleaned up tangle in valuetransfers

* Fix: fixed most refactor related bugs

* Feat: introduced branchmanager and utxodag

* Refactor: cleaned up some code

* Feat: introduced ledgerstate

* Feat: balances get booked into branches

* Feat: started implementing fork logic

* Feat: added some more logic :P

* Feat: commit before develop rebase

* Fix: fixed comments from golangci-lint

* Refactor: refactored moveTransaction logic

* Refactor: refactor

* Refactor: refactored valuetransfers to be a selfcontained app

* Fix: fixed errors due to merge

* Fix: fixed bug

* Feat: added preferred/liked to branch

* Feat: added a lot of branchmanager logic

* Feat: implemented FPC in the valuetransfers DAPP

* Refactor: refactored according to golint
---
 dapps/valuetransfers/dapp.go                  | 190 ++++
 dapps/valuetransfers/fpc.go                   | 188 ++++
 .../packages}/address/address.go              |   0
 .../packages}/address/signaturescheme/bls.go  |   2 +-
 .../address/signaturescheme/bls_test.go       |   0
 .../address/signaturescheme/ed25519.go        |   2 +-
 .../address/signaturescheme/signature.go      |   2 +-
 .../signaturescheme/signaturescheme.go        |   2 +-
 .../packages}/balance/balance.go              |   0
 .../packages}/balance/balance_test.go         |   4 +-
 .../valuetransfers/packages}/balance/color.go |   6 +-
 .../packages/branchmanager/branch.go          | 306 ++++++
 .../packages/branchmanager}/branchid.go       |  18 +-
 .../packages/branchmanager/branchmanager.go   | 581 +++++++++++
 .../packages/branchmanager/child_branch.go    | 147 +++
 .../packages/branchmanager/conflict.go        | 134 +++
 .../packages/branchmanager/conflict_id.go     |  14 +
 .../packages/branchmanager/conflict_member.go | 147 +++
 .../packages/branchmanager/events.go          |  12 +
 .../packages/branchmanager/objectstorage.go   |  31 +
 .../packages/ledgerstate/ledgerstate.go       |  15 +
 .../valuetransfers/packages}/payload/id.go    |   0
 .../packages}/payload/id_test.go              |   0
 .../packages}/payload/payload.go              |   2 +-
 .../packages}/payload/payload_test.go         |  12 +-
 .../packages}/tangle/constants.go             |   0
 .../valuetransfers/packages}/tangle/events.go |  21 +-
 .../packages}/tangle/missingpayload.go        |   2 +-
 .../packages}/tangle/objectstorage.go         |  22 +-
 .../packages}/tangle/payloadapprover.go       |   2 +-
 .../packages}/tangle/payloadmetadata.go       |   2 +-
 .../packages}/tangle/payloadmetadata_test.go  |   2 +-
 .../valuetransfers/packages/tangle/tangle.go  | 250 +++++
 .../packages}/transaction/id.go               |   0
 .../packages}/transaction/inputs.go           |   2 +-
 .../packages}/transaction/outputid.go         |   2 +-
 .../packages}/transaction/outputs.go          |   7 +-
 .../packages}/transaction/signatures.go       |   4 +-
 .../packages}/transaction/signatures_test.go  |   4 +-
 .../packages}/transaction/transaction.go      |   4 +-
 .../packages}/transaction/transaction_test.go |  17 +-
 .../packages/utxodag}/attachment.go           |  11 +-
 .../packages/utxodag}/consumer.go             |   6 +-
 .../valuetransfers/packages/utxodag/events.go |  50 +
 .../packages/utxodag}/missingoutput.go        |   6 +-
 .../packages/utxodag/objectstorage.go         |  48 +
 .../packages/utxodag}/output.go               |  47 +-
 .../packages/utxodag}/transactionmetadata.go  | 170 ++-
 .../packages/utxodag/utxodag.go               | 745 +++++++++++++
 .../packages/utxodag/utxodag_test.go          | 142 +++
 go.mod                                        |   1 +
 .../valuetransfer/ledgerstate/ledgerstate.go  |   4 -
 .../binary/valuetransfer/tangle/branch.go     |  84 --
 .../valuetransfer/tangle/output_test.go       |  45 -
 .../binary/valuetransfer/tangle/tangle.go     | 984 ------------------
 .../valuetransfer/tangle/tangle_test.go       |  97 --
 56 files changed, 3245 insertions(+), 1351 deletions(-)
 create mode 100644 dapps/valuetransfers/dapp.go
 create mode 100644 dapps/valuetransfers/fpc.go
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/address.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/signaturescheme/bls.go (98%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/signaturescheme/bls_test.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/signaturescheme/ed25519.go (98%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/signaturescheme/signature.go (84%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/address/signaturescheme/signaturescheme.go (89%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/balance/balance.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/balance/balance_test.go (86%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/balance/color.go (69%)
 create mode 100644 dapps/valuetransfers/packages/branchmanager/branch.go
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/branchmanager}/branchid.go (83%)
 create mode 100644 dapps/valuetransfers/packages/branchmanager/branchmanager.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/child_branch.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/conflict.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/conflict_id.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/conflict_member.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/events.go
 create mode 100644 dapps/valuetransfers/packages/branchmanager/objectstorage.go
 create mode 100644 dapps/valuetransfers/packages/ledgerstate/ledgerstate.go
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/payload/id.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/payload/id_test.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/payload/payload.go (99%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/payload/payload_test.go (90%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/constants.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/events.go (58%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/missingpayload.go (98%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/objectstorage.go (56%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/payloadapprover.go (99%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/payloadmetadata.go (99%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/tangle/payloadmetadata_test.go (95%)
 create mode 100644 dapps/valuetransfers/packages/tangle/tangle.go
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/id.go (100%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/inputs.go (98%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/outputid.go (96%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/outputs.go (96%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/signatures.go (97%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/signatures_test.go (91%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/transaction.go (98%)
 rename {packages/binary/valuetransfer => dapps/valuetransfers/packages}/transaction/transaction_test.go (88%)
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/utxodag}/attachment.go (94%)
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/utxodag}/consumer.go (97%)
 create mode 100644 dapps/valuetransfers/packages/utxodag/events.go
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/utxodag}/missingoutput.go (96%)
 create mode 100644 dapps/valuetransfers/packages/utxodag/objectstorage.go
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/utxodag}/output.go (87%)
 rename {packages/binary/valuetransfer/tangle => dapps/valuetransfers/packages/utxodag}/transactionmetadata.go (60%)
 create mode 100644 dapps/valuetransfers/packages/utxodag/utxodag.go
 create mode 100644 dapps/valuetransfers/packages/utxodag/utxodag_test.go
 delete mode 100644 packages/binary/valuetransfer/ledgerstate/ledgerstate.go
 delete mode 100644 packages/binary/valuetransfer/tangle/branch.go
 delete mode 100644 packages/binary/valuetransfer/tangle/output_test.go
 delete mode 100644 packages/binary/valuetransfer/tangle/tangle.go
 delete mode 100644 packages/binary/valuetransfer/tangle/tangle_test.go

diff --git a/dapps/valuetransfers/dapp.go b/dapps/valuetransfers/dapp.go
new file mode 100644
index 00000000..bc5e7dd0
--- /dev/null
+++ b/dapps/valuetransfers/dapp.go
@@ -0,0 +1,190 @@
+package valuetransfers
+
+import (
+	"time"
+
+	"github.com/iotaledger/hive.go/daemon"
+	"github.com/iotaledger/hive.go/events"
+	"github.com/iotaledger/hive.go/logger"
+	"github.com/iotaledger/hive.go/node"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/ledgerstate"
+	valuepayload "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tangle"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/utxodag"
+	"github.com/iotaledger/goshimmer/packages/binary/messagelayer/message"
+	messageTangle "github.com/iotaledger/goshimmer/packages/binary/messagelayer/tangle"
+	"github.com/iotaledger/goshimmer/packages/database"
+	"github.com/iotaledger/goshimmer/packages/shutdown"
+	"github.com/iotaledger/goshimmer/packages/vote"
+	"github.com/iotaledger/goshimmer/plugins/messagelayer"
+)
+
+const (
+	// PluginName contains the human readable name of the plugin.
+	PluginName          = "ValueTransfers"
+	AverageNetworkDelay = 6 * time.Second
+)
+
+var (
+	// Plugin is the plugin instance of the message layer plugin.
+	App = node.NewPlugin(PluginName, node.Enabled, configure, run)
+
+	// Tangle represents the value tangle that is used to express votes on value transactions.
+	Tangle *tangle.Tangle
+
+	// UTXODAG represents the flow of funds that is derived from the value tangle.
+	UTXODAG *utxodag.UTXODAG
+
+	// LedgerState represents the ledger state, that keeps track of the liked branches and offers an API to access funds.
+	LedgerState *ledgerstate.LedgerState
+
+	// log holds a reference to the logger used by this app.
+	log *logger.Logger
+)
+
+func configure(_ *node.Plugin) {
+	log = logger.NewLogger(PluginName)
+
+	log.Debug("configuring ValueTransfers")
+
+	// create instances
+	Tangle = tangle.New(database.GetBadgerInstance())
+	UTXODAG = utxodag.New(database.GetBadgerInstance(), Tangle)
+
+	// subscribe to message-layer
+	messagelayer.Tangle.Events.MessageSolid.Attach(events.NewClosure(onReceiveMessageFromMessageLayer))
+
+	// setup behavior of package instances
+	Tangle.Events.PayloadSolid.Attach(events.NewClosure(UTXODAG.ProcessSolidPayload))
+	UTXODAG.Events.TransactionBooked.Attach(events.NewClosure(onTransactionBooked))
+	UTXODAG.Events.Fork.Attach(events.NewClosure(onFork))
+
+	configureFPC()
+	// TODO: DECIDE WHAT WE SHOULD DO IF FPC FAILS
+	// voter.Events().Failed.Attach(events.NewClosure(panic))
+	voter.Events().Finalized.Attach(events.NewClosure(func(id string, opinion vote.Opinion) {
+		branchId, err := branchmanager.BranchIdFromBase58(id)
+		if err != nil {
+			log.Error(err)
+
+			return
+		}
+
+		switch opinion {
+		case vote.Like:
+			UTXODAG.BranchManager().SetBranchPreferred(branchId, true)
+		case vote.Dislike:
+			UTXODAG.BranchManager().SetBranchPreferred(branchId, false)
+		}
+	}))
+}
+
+func run(*node.Plugin) {
+	_ = daemon.BackgroundWorker("Tangle", func(shutdownSignal <-chan struct{}) {
+		<-shutdownSignal
+		Tangle.Shutdown()
+		UTXODAG.Shutdown()
+	}, shutdown.PriorityTangle)
+
+	runFPC()
+}
+
+func onReceiveMessageFromMessageLayer(cachedMessage *message.CachedMessage, cachedMessageMetadata *messageTangle.CachedMessageMetadata) {
+	defer cachedMessage.Release()
+	defer cachedMessageMetadata.Release()
+
+	solidMessage := cachedMessage.Unwrap()
+	if solidMessage == nil {
+		// TODO: LOG ERROR?
+
+		return
+	}
+
+	messagePayload := solidMessage.Payload()
+	if messagePayload.Type() != valuepayload.Type {
+		// TODO: LOG ERROR?
+
+		return
+	}
+
+	valuePayload, ok := messagePayload.(*valuepayload.Payload)
+	if !ok {
+		// TODO: LOG ERROR?
+
+		return
+	}
+
+	Tangle.AttachPayload(valuePayload)
+}
+
+func onTransactionBooked(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *utxodag.CachedTransactionMetadata, cachedBranch *branchmanager.CachedBranch, conflictingInputs []transaction.OutputId, previousConsumersForked bool) {
+	defer cachedTransaction.Release()
+	defer cachedTransactionMetadata.Release()
+	defer cachedBranch.Release()
+
+	if len(conflictingInputs) >= 1 {
+		// abort if the previous consumers where finalized already
+		if !previousConsumersForked {
+			return
+		}
+
+		branch := cachedBranch.Unwrap()
+		if branch == nil {
+			log.Error("failed to unpack branch")
+
+			return
+		}
+
+		err := voter.Vote(branch.Id().String(), vote.Dislike)
+		if err != nil {
+			log.Error(err)
+		}
+
+		return
+	}
+
+	// If the transaction is not conflicting, then we apply the fcob rule (we finalize after 2 network delays).
+	// Note: We do not set a liked flag after 1 network delay because that can be derived.
+	cachedTransactionMetadata.Retain()
+	time.AfterFunc(2*AverageNetworkDelay, func() {
+		defer cachedTransactionMetadata.Release()
+
+		transactionMetadata := cachedTransactionMetadata.Unwrap()
+		if transactionMetadata == nil {
+			return
+		}
+
+		if transactionMetadata.BranchId() != branchmanager.NewBranchId(transactionMetadata.Id()) {
+			return
+		}
+
+		transactionMetadata.SetFinalized(true)
+	})
+}
+
+func onFork(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *utxodag.CachedTransactionMetadata, cachedBranch *branchmanager.CachedBranch, conflictingInputs []transaction.OutputId) {
+	defer cachedTransaction.Release()
+	defer cachedTransactionMetadata.Release()
+	defer cachedBranch.Release()
+
+	transactionMetadata := cachedTransactionMetadata.Unwrap()
+	if transactionMetadata == nil {
+		return
+	}
+
+	branch := cachedBranch.Unwrap()
+	if branch == nil {
+		return
+	}
+
+	if time.Since(transactionMetadata.SoldificationTime()) < AverageNetworkDelay {
+		return
+	}
+
+	if _, err := UTXODAG.BranchManager().SetBranchPreferred(branch.Id(), true); err != nil {
+		log.Error(err)
+	}
+}
diff --git a/dapps/valuetransfers/fpc.go b/dapps/valuetransfers/fpc.go
new file mode 100644
index 00000000..977bf6c7
--- /dev/null
+++ b/dapps/valuetransfers/fpc.go
@@ -0,0 +1,188 @@
+package valuetransfers
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"net"
+	"strconv"
+
+	"github.com/iotaledger/hive.go/autopeering/peer"
+	"github.com/iotaledger/hive.go/daemon"
+	"github.com/iotaledger/hive.go/events"
+	"github.com/iotaledger/hive.go/logger"
+	"google.golang.org/grpc"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/packages/prng"
+	"github.com/iotaledger/goshimmer/packages/shutdown"
+	"github.com/iotaledger/goshimmer/packages/vote"
+	"github.com/iotaledger/goshimmer/packages/vote/fpc"
+	votenet "github.com/iotaledger/goshimmer/packages/vote/net"
+	"github.com/iotaledger/goshimmer/plugins/autopeering"
+	"github.com/iotaledger/goshimmer/plugins/autopeering/local"
+	"github.com/iotaledger/goshimmer/plugins/config"
+
+	"sync"
+
+	"github.com/iotaledger/hive.go/autopeering/peer/service"
+)
+
+const (
+	CfgFPCQuerySampleSize = "fpc.querySampleSize"
+	CfgFPCRoundInterval   = "fpc.roundInterval"
+	CfgFPCBindAddress     = "fpc.bindAddress"
+)
+
+func init() {
+	flag.Int(CfgFPCQuerySampleSize, 3, "Size of the voting quorum (k)")
+	flag.Int(CfgFPCRoundInterval, 5, "FPC round interval [s]")
+	flag.String(CfgFPCBindAddress, "0.0.0.0:10895", "the bind address on which the FPC vote server binds to")
+}
+
+var (
+	voter                *fpc.FPC
+	voterOnce            sync.Once
+	voterServer          *votenet.VoterServer
+	roundIntervalSeconds int64 = 5
+)
+
+// Voter returns the DRNGRoundBasedVoter instance used by the FPC plugin.
+func Voter() vote.DRNGRoundBasedVoter {
+	voterOnce.Do(func() {
+		// create a function which gets OpinionGivers
+		opinionGiverFunc := func() (givers []vote.OpinionGiver, err error) {
+			opinionGivers := make([]vote.OpinionGiver, 0)
+			for _, p := range autopeering.Discovery.GetVerifiedPeers() {
+				fpcService := p.Services().Get(service.FPCKey)
+				if fpcService == nil {
+					continue
+				}
+				// TODO: maybe cache the PeerOpinionGiver instead of creating a new one every time
+				opinionGivers = append(opinionGivers, &PeerOpinionGiver{p: p})
+			}
+			return opinionGivers, nil
+		}
+		voter = fpc.New(opinionGiverFunc)
+	})
+	return voter
+}
+
+func configureFPC() {
+	log = logger.NewLogger(PluginName)
+	lPeer := local.GetInstance()
+
+	bindAddr := config.Node.GetString(CfgFPCBindAddress)
+	_, portStr, err := net.SplitHostPort(bindAddr)
+	if err != nil {
+		log.Fatalf("FPC bind address '%s' is invalid: %s", bindAddr, err)
+	}
+	port, err := strconv.Atoi(portStr)
+	if err != nil {
+		log.Fatalf("FPC bind address '%s' is invalid: %s", bindAddr, err)
+	}
+
+	if err := lPeer.UpdateService(service.FPCKey, "tcp", port); err != nil {
+		log.Fatalf("could not update services: %v", err)
+	}
+
+	voter.Events().RoundExecuted.Attach(events.NewClosure(func(roundStats *vote.RoundStats) {
+		peersQueried := len(roundStats.QueriedOpinions)
+		voteContextsCount := len(roundStats.ActiveVoteContexts)
+		log.Infof("executed round with rand %0.4f for %d vote contexts on %d peers, took %v", roundStats.RandUsed, voteContextsCount, peersQueried, roundStats.Duration)
+	}))
+}
+
+func runFPC() {
+	daemon.BackgroundWorker("FPCVoterServer", func(shutdownSignal <-chan struct{}) {
+		voterServer = votenet.New(Voter(), func(id string) vote.Opinion {
+			branchId, err := branchmanager.BranchIdFromBase58(id)
+			if err != nil {
+				log.Errorf("received invalid vote request for branch '%s'", id)
+
+				return vote.Unknown
+			}
+
+			cachedBranch := UTXODAG.BranchManager().GetBranch(branchId)
+			defer cachedBranch.Release()
+
+			branch := cachedBranch.Unwrap()
+			if branch == nil {
+				return vote.Unknown
+			}
+
+			if !branch.Preferred() {
+				return vote.Dislike
+			}
+
+			return vote.Like
+		}, config.Node.GetString(CfgFPCBindAddress))
+
+		go func() {
+			if err := voterServer.Run(); err != nil {
+				log.Error(err)
+			}
+		}()
+
+		log.Infof("Started vote server on %s", config.Node.GetString(CfgFPCBindAddress))
+		<-shutdownSignal
+		voterServer.Shutdown()
+		log.Info("Stopped vote server")
+	}, shutdown.PriorityFPC)
+
+	daemon.BackgroundWorker("FPCRoundsInitiator", func(shutdownSignal <-chan struct{}) {
+		log.Infof("Started FPC round initiator")
+		unixTsPRNG := prng.NewUnixTimestampPRNG(roundIntervalSeconds)
+		defer unixTsPRNG.Stop()
+	exit:
+		for {
+			select {
+			case r := <-unixTsPRNG.C():
+				if err := voter.Round(r); err != nil {
+					log.Errorf("unable to execute FPC round: %s", err)
+				}
+			case <-shutdownSignal:
+				break exit
+			}
+		}
+		log.Infof("Stopped FPC round initiator")
+	}, shutdown.PriorityFPC)
+}
+
+// PeerOpinionGiver implements the OpinionGiver interface based on a peer.
+type PeerOpinionGiver struct {
+	p *peer.Peer
+}
+
+func (pog *PeerOpinionGiver) Query(ctx context.Context, ids []string) (vote.Opinions, error) {
+	fpcServicePort := pog.p.Services().Get(service.FPCKey).Port()
+	fpcAddr := net.JoinHostPort(pog.p.IP().String(), strconv.Itoa(fpcServicePort))
+
+	var opts []grpc.DialOption
+	opts = append(opts, grpc.WithInsecure())
+
+	// connect to the FPC service
+	conn, err := grpc.Dial(fpcAddr, opts...)
+	if err != nil {
+		return nil, fmt.Errorf("unable to connect to FPC service: %w", err)
+	}
+	defer conn.Close()
+
+	client := votenet.NewVoterQueryClient(conn)
+	reply, err := client.Opinion(ctx, &votenet.QueryRequest{Id: ids})
+	if err != nil {
+		return nil, fmt.Errorf("unable to query opinions: %w", err)
+	}
+
+	// convert int32s in reply to opinions
+	opinions := make(vote.Opinions, len(reply.Opinion))
+	for i, intOpn := range reply.Opinion {
+		opinions[i] = vote.ConvertInt32Opinion(intOpn)
+	}
+
+	return opinions, nil
+}
+
+func (pog *PeerOpinionGiver) ID() string {
+	return pog.p.ID().String()
+}
diff --git a/packages/binary/valuetransfer/address/address.go b/dapps/valuetransfers/packages/address/address.go
similarity index 100%
rename from packages/binary/valuetransfer/address/address.go
rename to dapps/valuetransfers/packages/address/address.go
diff --git a/packages/binary/valuetransfer/address/signaturescheme/bls.go b/dapps/valuetransfers/packages/address/signaturescheme/bls.go
similarity index 98%
rename from packages/binary/valuetransfer/address/signaturescheme/bls.go
rename to dapps/valuetransfers/packages/address/signaturescheme/bls.go
index ca2b0134..5b17d8b6 100644
--- a/packages/binary/valuetransfer/address/signaturescheme/bls.go
+++ b/dapps/valuetransfers/packages/address/signaturescheme/bls.go
@@ -11,7 +11,7 @@ import (
 	"go.dedis.ch/kyber/v3/sign/bdn"
 	"go.dedis.ch/kyber/v3/util/random"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 )
 
 // bls.go implements BLS signature scheme which is robust against rogue public key attacks,
diff --git a/packages/binary/valuetransfer/address/signaturescheme/bls_test.go b/dapps/valuetransfers/packages/address/signaturescheme/bls_test.go
similarity index 100%
rename from packages/binary/valuetransfer/address/signaturescheme/bls_test.go
rename to dapps/valuetransfers/packages/address/signaturescheme/bls_test.go
diff --git a/packages/binary/valuetransfer/address/signaturescheme/ed25519.go b/dapps/valuetransfers/packages/address/signaturescheme/ed25519.go
similarity index 98%
rename from packages/binary/valuetransfer/address/signaturescheme/ed25519.go
rename to dapps/valuetransfers/packages/address/signaturescheme/ed25519.go
index 54349efe..09597718 100644
--- a/packages/binary/valuetransfer/address/signaturescheme/ed25519.go
+++ b/dapps/valuetransfers/packages/address/signaturescheme/ed25519.go
@@ -6,7 +6,7 @@ import (
 	"github.com/iotaledger/hive.go/crypto/ed25519"
 	"github.com/iotaledger/hive.go/marshalutil"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 )
 
 // region PUBLIC API ///////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/packages/binary/valuetransfer/address/signaturescheme/signature.go b/dapps/valuetransfers/packages/address/signaturescheme/signature.go
similarity index 84%
rename from packages/binary/valuetransfer/address/signaturescheme/signature.go
rename to dapps/valuetransfers/packages/address/signaturescheme/signature.go
index 1e89e733..006d19c6 100644
--- a/packages/binary/valuetransfer/address/signaturescheme/signature.go
+++ b/dapps/valuetransfers/packages/address/signaturescheme/signature.go
@@ -1,6 +1,6 @@
 package signaturescheme
 
-import "github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
+import "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 
 // Signature defines an interface for an address signature generated by the corresponding signature scheme.
 type Signature interface {
diff --git a/packages/binary/valuetransfer/address/signaturescheme/signaturescheme.go b/dapps/valuetransfers/packages/address/signaturescheme/signaturescheme.go
similarity index 89%
rename from packages/binary/valuetransfer/address/signaturescheme/signaturescheme.go
rename to dapps/valuetransfers/packages/address/signaturescheme/signaturescheme.go
index 2abe2f6a..046a44f7 100644
--- a/packages/binary/valuetransfer/address/signaturescheme/signaturescheme.go
+++ b/dapps/valuetransfers/packages/address/signaturescheme/signaturescheme.go
@@ -1,7 +1,7 @@
 package signaturescheme
 
 import (
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 )
 
 // SignatureScheme defines an interface for different signature generation methods (i.e. ED25519, WOTS, and so on ...).
diff --git a/packages/binary/valuetransfer/balance/balance.go b/dapps/valuetransfers/packages/balance/balance.go
similarity index 100%
rename from packages/binary/valuetransfer/balance/balance.go
rename to dapps/valuetransfers/packages/balance/balance.go
diff --git a/packages/binary/valuetransfer/balance/balance_test.go b/dapps/valuetransfers/packages/balance/balance_test.go
similarity index 86%
rename from packages/binary/valuetransfer/balance/balance_test.go
rename to dapps/valuetransfers/packages/balance/balance_test.go
index 98fbd6bb..d807eab7 100644
--- a/packages/binary/valuetransfer/balance/balance_test.go
+++ b/dapps/valuetransfers/packages/balance/balance_test.go
@@ -7,9 +7,9 @@ import (
 )
 
 func TestMarshalUnmarshal(t *testing.T) {
-	balance := New(COLOR_IOTA, 1337)
+	balance := New(ColorIOTA, 1337)
 	assert.Equal(t, int64(1337), balance.Value())
-	assert.Equal(t, COLOR_IOTA, balance.Color())
+	assert.Equal(t, ColorIOTA, balance.Color())
 
 	marshaledBalance := balance.Bytes()
 	assert.Equal(t, Length, len(marshaledBalance))
diff --git a/packages/binary/valuetransfer/balance/color.go b/dapps/valuetransfers/packages/balance/color.go
similarity index 69%
rename from packages/binary/valuetransfer/balance/color.go
rename to dapps/valuetransfers/packages/balance/color.go
index 5f7ef740..33cd961e 100644
--- a/packages/binary/valuetransfer/balance/color.go
+++ b/dapps/valuetransfers/packages/balance/color.go
@@ -26,13 +26,13 @@ func (color Color) Bytes() []byte {
 }
 
 func (color Color) String() string {
-	if color == COLOR_IOTA {
+	if color == ColorIOTA {
 		return "IOTA"
 	}
 
 	return base58.Encode(color[:])
 }
 
-var COLOR_IOTA Color = [32]byte{}
+var ColorIOTA Color = [32]byte{}
 
-var COLOR_NEW = [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+var ColorNew = [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
diff --git a/dapps/valuetransfers/packages/branchmanager/branch.go b/dapps/valuetransfers/packages/branchmanager/branch.go
new file mode 100644
index 00000000..d513e1ba
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/branch.go
@@ -0,0 +1,306 @@
+package branchmanager
+
+import (
+	"sync"
+
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+	"github.com/iotaledger/hive.go/stringify"
+	"github.com/iotaledger/hive.go/types"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+)
+
+type Branch struct {
+	objectstorage.StorableObjectFlags
+
+	id             BranchId
+	parentBranches []BranchId
+	conflicts      map[ConflictId]types.Empty
+	preferred      bool
+	liked          bool
+
+	conflictsMutex sync.RWMutex
+	preferredMutex sync.RWMutex
+	likedMutex     sync.RWMutex
+}
+
+func NewBranch(id BranchId, parentBranches []BranchId, conflictingInputs []transaction.OutputId) *Branch {
+	conflictingInputsMap := make(map[ConflictId]types.Empty)
+	for _, conflictingInput := range conflictingInputs {
+		conflictingInputsMap[conflictingInput] = types.Void
+	}
+
+	return &Branch{
+		id:             id,
+		parentBranches: parentBranches,
+		conflicts:      conflictingInputsMap,
+	}
+}
+
+func BranchFromStorageKey(key []byte, optionalTargetObject ...*Branch) (result *Branch, err error, consumedBytes int) {
+	// determine the target object that will hold the unmarshaled information
+	switch len(optionalTargetObject) {
+	case 0:
+		result = &Branch{}
+	case 1:
+		result = optionalTargetObject[0]
+	default:
+		panic("too many arguments in call to BranchFromStorageKey")
+	}
+
+	// parse information
+	marshalUtil := marshalutil.New(key)
+	result.id, err = ParseBranchId(marshalUtil)
+	if err != nil {
+		return
+	}
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func BranchFromBytes(bytes []byte, optionalTargetObject ...*Branch) (result *Branch, err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(bytes)
+	result, err = ParseBranch(marshalUtil, optionalTargetObject...)
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func ParseBranch(marshalUtil *marshalutil.MarshalUtil, optionalTargetObject ...*Branch) (result *Branch, err error) {
+	if parsedObject, parseErr := marshalUtil.Parse(func(data []byte) (interface{}, error, int) {
+		return BranchFromStorageKey(data, optionalTargetObject...)
+	}); parseErr != nil {
+		err = parseErr
+
+		return
+	} else {
+		result = parsedObject.(*Branch)
+	}
+
+	if _, err = marshalUtil.Parse(func(data []byte) (parseResult interface{}, parseErr error, parsedBytes int) {
+		parseErr, parsedBytes = result.UnmarshalObjectStorageValue(data)
+
+		return
+	}); err != nil {
+		return
+	}
+
+	return
+}
+
+func (branch *Branch) Id() BranchId {
+	return branch.id
+}
+
+func (branch *Branch) ParentBranches() []BranchId {
+	return branch.parentBranches
+}
+
+func (branch *Branch) IsAggregated() bool {
+	return len(branch.parentBranches) > 1
+}
+
+func (branch *Branch) Conflicts() (conflicts map[ConflictId]types.Empty) {
+	branch.conflictsMutex.RLock()
+	defer branch.conflictsMutex.RUnlock()
+
+	conflicts = make(map[ConflictId]types.Empty, len(branch.conflicts))
+	for conflict := range branch.conflicts {
+		conflicts[conflict] = types.Void
+	}
+
+	return
+}
+
+func (branch *Branch) AddConflict(conflict ConflictId) (added bool) {
+	branch.conflictsMutex.RLock()
+	if _, exists := branch.conflicts[conflict]; exists {
+		branch.conflictsMutex.RUnlock()
+
+		return
+	}
+
+	branch.conflictsMutex.RUnlock()
+	branch.conflictsMutex.Lock()
+	defer branch.conflictsMutex.Unlock()
+
+	if _, exists := branch.conflicts[conflict]; exists {
+		return
+	}
+
+	branch.conflicts[conflict] = types.Void
+	added = true
+
+	return
+}
+
+func (branch *Branch) Preferred() bool {
+	branch.preferredMutex.RLock()
+	defer branch.preferredMutex.RUnlock()
+
+	return branch.preferred
+}
+
+func (branch *Branch) SetPreferred(preferred bool) (modified bool) {
+	branch.preferredMutex.RLock()
+	if branch.preferred == preferred {
+		branch.preferredMutex.RUnlock()
+
+		return
+	}
+
+	branch.preferredMutex.RUnlock()
+	branch.preferredMutex.Lock()
+	defer branch.preferredMutex.Lock()
+
+	if branch.preferred == preferred {
+		return
+	}
+
+	branch.preferred = preferred
+	modified = true
+
+	return branch.preferred
+}
+
+func (branch *Branch) Liked() bool {
+	branch.likedMutex.RLock()
+	defer branch.likedMutex.RUnlock()
+
+	return branch.liked
+}
+
+func (branch *Branch) SetLiked(liked bool) (modified bool) {
+	branch.likedMutex.RLock()
+	if branch.liked == liked {
+		branch.likedMutex.RUnlock()
+
+		return
+	}
+
+	branch.likedMutex.RUnlock()
+	branch.likedMutex.Lock()
+	defer branch.likedMutex.Lock()
+
+	if branch.liked == liked {
+		return
+	}
+
+	branch.liked = liked
+	modified = true
+
+	return branch.liked
+}
+
+func (branch *Branch) Bytes() []byte {
+	return marshalutil.New().
+		WriteBytes(branch.ObjectStorageKey()).
+		WriteBytes(branch.ObjectStorageValue()).
+		Bytes()
+}
+
+func (branch *Branch) String() string {
+	return stringify.Struct("Branch",
+		stringify.StructField("id", branch.Id()),
+	)
+}
+
+func (branch *Branch) Update(other objectstorage.StorableObject) {
+	panic("updates are disabled - please use the setters")
+}
+
+func (branch *Branch) ObjectStorageKey() []byte {
+	return branch.id.Bytes()
+}
+
+func (branch *Branch) ObjectStorageValue() []byte {
+	branch.preferredMutex.RLock()
+	branch.likedMutex.RLock()
+	defer branch.preferredMutex.RUnlock()
+	defer branch.likedMutex.RUnlock()
+
+	parentBranches := branch.ParentBranches()
+	parentBranchCount := len(parentBranches)
+
+	marshalUtil := marshalutil.New(2*marshalutil.BOOL_SIZE + marshalutil.UINT32_SIZE + parentBranchCount*BranchIdLength)
+	marshalUtil.WriteBool(branch.preferred)
+	marshalUtil.WriteBool(branch.liked)
+	marshalUtil.WriteUint32(uint32(parentBranchCount))
+	for _, branchId := range parentBranches {
+		marshalUtil.WriteBytes(branchId.Bytes())
+	}
+
+	return marshalUtil.Bytes()
+}
+
+func (branch *Branch) UnmarshalObjectStorageValue(valueBytes []byte) (err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(valueBytes)
+	branch.preferred, err = marshalUtil.ReadBool()
+	if err != nil {
+		return
+	}
+	branch.liked, err = marshalUtil.ReadBool()
+	if err != nil {
+		return
+	}
+	parentBranchCount, err := marshalUtil.ReadUint32()
+	if err != nil {
+		return
+	}
+	branch.parentBranches = make([]BranchId, parentBranchCount)
+	for i := uint32(0); i < parentBranchCount; i++ {
+		branch.parentBranches[i], err = ParseBranchId(marshalUtil)
+		if err != nil {
+			return
+		}
+	}
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+type CachedBranch struct {
+	objectstorage.CachedObject
+}
+
+func (cachedBranches *CachedBranch) Retain() *CachedBranch {
+	return &CachedBranch{cachedBranches.CachedObject.Retain()}
+}
+
+func (cachedBranches *CachedBranch) Unwrap() *Branch {
+	if untypedObject := cachedBranches.Get(); untypedObject == nil {
+		return nil
+	} else {
+		if typedObject := untypedObject.(*Branch); typedObject == nil || typedObject.IsDeleted() {
+			return nil
+		} else {
+			return typedObject
+		}
+	}
+}
+
+func (cachedBranches *CachedBranch) Consume(consumer func(branch *Branch), forceRelease ...bool) (consumed bool) {
+	return cachedBranches.CachedObject.Consume(func(object objectstorage.StorableObject) {
+		consumer(object.(*Branch))
+	}, forceRelease...)
+}
+
+type CachedBranches map[BranchId]*CachedBranch
+
+func (cachedBranches CachedBranches) Consume(consumer func(branch *Branch)) (consumed bool) {
+	for _, cachedBranch := range cachedBranches {
+		consumed = cachedBranch.Consume(func(output *Branch) {
+			consumer(output)
+		}) || consumed
+	}
+
+	return
+}
+
+func (cachedBranches CachedBranches) Release(force ...bool) {
+	for _, cachedBranch := range cachedBranches {
+		cachedBranch.Release(force...)
+	}
+}
diff --git a/packages/binary/valuetransfer/tangle/branchid.go b/dapps/valuetransfers/packages/branchmanager/branchid.go
similarity index 83%
rename from packages/binary/valuetransfer/tangle/branchid.go
rename to dapps/valuetransfers/packages/branchmanager/branchid.go
index ba7f6f93..1840c57a 100644
--- a/packages/binary/valuetransfer/tangle/branchid.go
+++ b/dapps/valuetransfers/packages/branchmanager/branchid.go
@@ -1,12 +1,13 @@
-package tangle
+package branchmanager
 
 import (
 	"fmt"
 
 	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/types"
 	"github.com/mr-tron/base58"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 type BranchId [BranchIdLength]byte
@@ -82,3 +83,16 @@ func (branchId BranchId) String() string {
 // BranchIdLength encodes the length of a branch identifier - since branches get created by transactions, it has the
 // same length as a transaction Id.
 const BranchIdLength = transaction.IdLength
+
+type BranchIds map[BranchId]types.Empty
+
+func (branchIds BranchIds) ToList() (result []BranchId) {
+	result = make([]BranchId, len(branchIds))
+	i := 0
+	for branchId := range branchIds {
+		result[i] = branchId
+		i++
+	}
+
+	return
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/branchmanager.go b/dapps/valuetransfers/packages/branchmanager/branchmanager.go
new file mode 100644
index 00000000..799568cc
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/branchmanager.go
@@ -0,0 +1,581 @@
+package branchmanager
+
+import (
+	"container/list"
+	"fmt"
+	"sort"
+
+	"github.com/dgraph-io/badger/v2"
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+	"github.com/iotaledger/hive.go/types"
+	"golang.org/x/crypto/blake2b"
+
+	"github.com/iotaledger/goshimmer/packages/binary/storageprefix"
+)
+
+type BranchManager struct {
+	branchStorage         *objectstorage.ObjectStorage
+	childBranchStorage    *objectstorage.ObjectStorage
+	conflictStorage       *objectstorage.ObjectStorage
+	conflictMemberStorage *objectstorage.ObjectStorage
+
+	Events *Events
+}
+
+func New(badgerInstance *badger.DB) (result *BranchManager) {
+	osFactory := objectstorage.NewFactory(badgerInstance, storageprefix.ValueTransfers)
+
+	result = &BranchManager{
+		branchStorage: osFactory.New(osBranch, osBranchFactory, osBranchOptions...),
+	}
+	result.init()
+
+	return
+}
+
+func (branchManager *BranchManager) init() {
+	branchManager.branchStorage.StoreIfAbsent(NewBranch(MasterBranchId, []BranchId{}, []ConflictId{}))
+}
+
+func (branchManager *BranchManager) Conflict(conflictId ConflictId) *CachedConflict {
+	return &CachedConflict{CachedObject: branchManager.conflictStorage.Load(conflictId.Bytes())}
+}
+
+func (branchManager *BranchManager) ConflictMembers(conflictId ConflictId) CachedConflictMembers {
+	conflictMembers := make(CachedConflictMembers, 0)
+	branchManager.conflictMemberStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
+		conflictMembers = append(conflictMembers, &CachedConflictMember{CachedObject: cachedObject})
+
+		return true
+	}, conflictId.Bytes())
+
+	return conflictMembers
+}
+
+func (branchManager *BranchManager) AddBranch(branch *Branch) *CachedBranch {
+	return &CachedBranch{CachedObject: branchManager.branchStorage.ComputeIfAbsent(branch.Id().Bytes(), func(key []byte) objectstorage.StorableObject {
+		branch.Persist()
+		branch.SetModified()
+
+		return branch
+	})}
+}
+
+func (branchManager *BranchManager) GetBranch(branchId BranchId) *CachedBranch {
+	return &CachedBranch{CachedObject: branchManager.branchStorage.Load(branchId.Bytes())}
+}
+
+func (branchManager *BranchManager) InheritBranches(branches ...BranchId) (cachedAggregatedBranch *CachedBranch, err error) {
+	// return the MasterBranch if we have no branches in the parameters
+	if len(branches) == 0 {
+		cachedAggregatedBranch = branchManager.GetBranch(MasterBranchId)
+
+		return
+	}
+
+	if len(branches) == 1 {
+		cachedAggregatedBranch = branchManager.GetBranch(branches[0])
+
+		return
+	}
+
+	// filter out duplicates and shared ancestor Branches (abort if we faced an error)
+	deepestCommonAncestors, err := branchManager.findDeepestCommonAncestorBranches(branches...)
+	if err != nil {
+		return
+	}
+
+	// if there is only one branch that we found, then we are done
+	if len(deepestCommonAncestors) == 1 {
+		for _, firstBranchInList := range deepestCommonAncestors {
+			cachedAggregatedBranch = firstBranchInList
+		}
+
+		return
+	}
+
+	// if there is more than one parents: aggregate
+	aggregatedBranchId, aggregatedBranchParents, err := branchManager.determineAggregatedBranchDetails(deepestCommonAncestors)
+	if err != nil {
+		return
+	}
+
+	newAggregatedBranchCreated := false
+	cachedAggregatedBranch = &CachedBranch{CachedObject: branchManager.branchStorage.ComputeIfAbsent(aggregatedBranchId.Bytes(), func(key []byte) (object objectstorage.StorableObject) {
+		aggregatedReality := NewBranch(aggregatedBranchId, aggregatedBranchParents, []ConflictId{})
+
+		// TODO: FIX
+		/*
+			for _, parentRealityId := range aggregatedBranchParents {
+				tangle.GetBranch(parentRealityId).Consume(func(branch *Branch) {
+					branch.RegisterSubReality(aggregatedRealityId)
+				})
+			}
+		*/
+
+		aggregatedReality.SetModified()
+
+		newAggregatedBranchCreated = true
+
+		return aggregatedReality
+	})}
+
+	if !newAggregatedBranchCreated {
+		fmt.Println("1")
+		// TODO: FIX
+		/*
+			aggregatedBranch := cachedAggregatedBranch.Unwrap()
+
+			for _, realityId := range aggregatedBranchParents {
+				if aggregatedBranch.AddParentReality(realityId) {
+					tangle.GetBranch(realityId).Consume(func(branch *Branch) {
+						branch.RegisterSubReality(aggregatedRealityId)
+					})
+				}
+			}
+		*/
+	}
+
+	return
+}
+
+func (branchManager *BranchManager) ChildBranches(branchId BranchId) CachedChildBranches {
+	childBranches := make(CachedChildBranches, 0)
+	branchManager.childBranchStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
+		childBranches = append(childBranches, &CachedChildBranch{CachedObject: cachedObject})
+
+		return true
+	}, branchId.Bytes())
+
+	return childBranches
+}
+
+func (branchManager *BranchManager) SetBranchPreferred(branchId BranchId, preferred bool) (modified bool, err error) {
+	return branchManager.setBranchPreferred(branchManager.GetBranch(branchId), preferred)
+}
+
+func (branchManager *BranchManager) setBranchPreferred(cachedBranch *CachedBranch, preferred bool) (modified bool, err error) {
+	defer cachedBranch.Release()
+	branch := cachedBranch.Unwrap()
+	if branch == nil {
+		err = fmt.Errorf("failed to unwrap branch")
+
+		return
+	}
+
+	if !preferred {
+		if modified = branch.SetPreferred(false); modified {
+			branchManager.Events.BranchUnpreferred.Trigger(cachedBranch)
+
+			branchManager.propagateDislike(cachedBranch.Retain())
+		}
+
+		return
+	}
+
+	for conflictId := range branch.Conflicts() {
+		branchManager.ConflictMembers(conflictId).Consume(func(conflictMember *ConflictMember) {
+			if conflictMember.BranchId() == branch.Id() {
+				return
+			}
+
+			_, _ = branchManager.setBranchPreferred(branchManager.GetBranch(conflictMember.BranchId()), false)
+		})
+	}
+
+	if modified = branch.SetPreferred(true); !modified {
+		return
+	}
+
+	branchManager.Events.BranchPreferred.Trigger(cachedBranch)
+
+	err = branchManager.propagateLike(cachedBranch.Retain())
+
+	return
+}
+
+func (branchManager *BranchManager) propagateLike(cachedBranch *CachedBranch) (err error) {
+	// unpack CachedBranch and abort of the branch doesn't exist or isn't preferred
+	defer cachedBranch.Release()
+	branch := cachedBranch.Unwrap()
+	if branch == nil || !branch.Preferred() {
+		return
+	}
+
+	// check if parents are liked
+	for _, parentBranchId := range branch.ParentBranches() {
+		// abort, if the parent branch can not be loaded
+		cachedParentBranch := branchManager.GetBranch(parentBranchId)
+		parentBranch := cachedParentBranch.Unwrap()
+		if parentBranch == nil {
+			cachedParentBranch.Release()
+
+			return fmt.Errorf("failed to load parent branch '%s' of branch '%s'", parentBranchId, branch.Id())
+		}
+
+		// abort if the parent branch is not liked
+		if !parentBranch.Liked() {
+			cachedParentBranch.Release()
+
+			return
+		}
+
+		cachedParentBranch.Release()
+	}
+
+	// abort if the branch was liked already
+	if !branch.SetLiked(true) {
+		return
+	}
+
+	// trigger events
+	branchManager.Events.BranchLiked.Trigger(cachedBranch)
+
+	// propagate liked checks to the children
+	for _, cachedChildBranch := range branchManager.ChildBranches(branch.Id()) {
+		childBranch := cachedChildBranch.Unwrap()
+		if childBranch == nil {
+			cachedChildBranch.Release()
+
+			continue
+		}
+
+		if err = branchManager.propagateLike(branchManager.GetBranch(childBranch.Id())); err != nil {
+			cachedChildBranch.Release()
+
+			return
+		}
+
+		cachedChildBranch.Release()
+	}
+
+	return
+}
+
+func (branchManager *BranchManager) propagateDislike(cachedBranch *CachedBranch) {
+	defer cachedBranch.Release()
+	branch := cachedBranch.Unwrap()
+	if branch == nil || !branch.SetLiked(false) {
+		return
+	}
+
+	branchManager.Events.BranchDisliked.Trigger(cachedBranch)
+
+	branchManager.ChildBranches(branch.Id()).Consume(func(childBranch *ChildBranch) {
+		branchManager.propagateDislike(branchManager.GetBranch(childBranch.Id()))
+	})
+}
+
+func (branchManager *BranchManager) determineAggregatedBranchDetails(deepestCommonAncestors CachedBranches) (aggregatedBranchId BranchId, aggregatedBranchParents []BranchId, err error) {
+	aggregatedBranchParents = make([]BranchId, len(deepestCommonAncestors))
+
+	i := 0
+	aggregatedBranchConflictParents := make(CachedBranches)
+	for branchId, cachedBranch := range deepestCommonAncestors {
+		// release all following entries if we have encountered an error
+		if err != nil {
+			cachedBranch.Release()
+
+			continue
+		}
+
+		// store BranchId as parent
+		aggregatedBranchParents[i] = branchId
+		i++
+
+		// abort if we could not unwrap the Branch (should never happen)
+		branch := cachedBranch.Unwrap()
+		if branch == nil {
+			cachedBranch.Release()
+
+			err = fmt.Errorf("failed to unwrap brach '%s'", branchId)
+
+			continue
+		}
+
+		if branch.IsAggregated() {
+			aggregatedBranchConflictParents[branchId] = cachedBranch
+
+			continue
+		}
+
+		err = branchManager.collectClosestConflictAncestors(branch, aggregatedBranchConflictParents)
+
+		cachedBranch.Release()
+	}
+
+	if err != nil {
+		aggregatedBranchConflictParents.Release()
+		aggregatedBranchConflictParents = nil
+
+		return
+	}
+
+	aggregatedBranchId = branchManager.generateAggregatedBranchId(aggregatedBranchConflictParents)
+
+	return
+}
+
+func (branchManager *BranchManager) generateAggregatedBranchId(aggregatedBranches CachedBranches) BranchId {
+	counter := 0
+	branchIds := make([]BranchId, len(aggregatedBranches))
+	for branchId, cachedBranch := range aggregatedBranches {
+		branchIds[counter] = branchId
+
+		counter++
+
+		cachedBranch.Release()
+	}
+
+	sort.Slice(branchIds, func(i, j int) bool {
+		for k := 0; k < len(branchIds[k]); k++ {
+			if branchIds[i][k] < branchIds[j][k] {
+				return true
+			} else if branchIds[i][k] > branchIds[j][k] {
+				return false
+			}
+		}
+
+		return false
+	})
+
+	marshalUtil := marshalutil.New(BranchIdLength * len(branchIds))
+	for _, branchId := range branchIds {
+		marshalUtil.WriteBytes(branchId.Bytes())
+	}
+
+	return blake2b.Sum256(marshalUtil.Bytes())
+}
+
+func (branchManager *BranchManager) collectClosestConflictAncestors(branch *Branch, closestConflictAncestors CachedBranches) (err error) {
+	// initialize stack
+	stack := list.New()
+	for _, parentRealityId := range branch.ParentBranches() {
+		stack.PushBack(parentRealityId)
+	}
+
+	// work through stack
+	processedBranches := make(map[BranchId]types.Empty)
+	for stack.Len() != 0 {
+		// iterate through the parents (in a func so we can used defer)
+		err = func() error {
+			// pop parent branch id from stack
+			firstStackElement := stack.Front()
+			defer stack.Remove(firstStackElement)
+			parentBranchId := stack.Front().Value.(BranchId)
+
+			// abort if the parent has been processed already
+			if _, branchProcessed := processedBranches[parentBranchId]; branchProcessed {
+				return nil
+			}
+			processedBranches[parentBranchId] = types.Void
+
+			// load parent branch from database
+			cachedParentBranch := branchManager.GetBranch(parentBranchId)
+
+			// abort if the parent branch could not be found (should never happen)
+			parentBranch := cachedParentBranch.Unwrap()
+			if parentBranch == nil {
+				cachedParentBranch.Release()
+
+				return fmt.Errorf("failed to load branch '%s'", parentBranchId)
+			}
+
+			// if the parent Branch is not aggregated, then we have found the closest conflict ancestor
+			if !parentBranch.IsAggregated() {
+				closestConflictAncestors[parentBranchId] = cachedParentBranch
+
+				return nil
+			}
+
+			// queue parents for additional check (recursion)
+			for _, parentRealityId := range parentBranch.ParentBranches() {
+				stack.PushBack(parentRealityId)
+			}
+
+			// release the branch (we don't need it anymore)
+			cachedParentBranch.Release()
+
+			return nil
+		}()
+
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+// findDeepestCommonAncestorBranches takes a number of BranchIds and determines the most specialized Branches (furthest
+// away from the MasterBranch) in that list, that contains all of the named BranchIds.
+//
+// Example: If we hand in "A, B" and B has A as its parent, then the result will contain the Branch B, because B is a
+//          child of A.
+func (branchManager *BranchManager) findDeepestCommonAncestorBranches(branches ...BranchId) (result CachedBranches, err error) {
+	result = make(CachedBranches)
+
+	processedBranches := make(map[BranchId]types.Empty)
+	for _, branchId := range branches {
+		err = func() error {
+			// continue, if we have processed this branch already
+			if _, exists := processedBranches[branchId]; exists {
+				return nil
+			}
+			processedBranches[branchId] = types.Void
+
+			// load branch from objectstorage
+			cachedBranch := branchManager.GetBranch(branchId)
+
+			// abort if we could not load the CachedBranch
+			branch := cachedBranch.Unwrap()
+			if branch == nil {
+				cachedBranch.Release()
+
+				return fmt.Errorf("could not load branch '%s'", branchId)
+			}
+
+			// check branches position relative to already aggregated branches
+			for aggregatedBranchId, cachedAggregatedBranch := range result {
+				// abort if we can not load the branch
+				aggregatedBranch := cachedAggregatedBranch.Unwrap()
+				if aggregatedBranch == nil {
+					return fmt.Errorf("could not load branch '%s'", aggregatedBranchId)
+				}
+
+				// if the current branch is an ancestor of an already aggregated branch, then we have found the more
+				// "specialized" branch already and keep it
+				if isAncestor, ancestorErr := branchManager.branchIsAncestorOfBranch(branch, aggregatedBranch); isAncestor || ancestorErr != nil {
+					return ancestorErr
+				}
+
+				// check if the aggregated Branch is an ancestor of the current Branch and abort if we face an error
+				isAncestor, ancestorErr := branchManager.branchIsAncestorOfBranch(aggregatedBranch, branch)
+				if ancestorErr != nil {
+					return ancestorErr
+				}
+
+				// if the aggregated branch is an ancestor of the current branch, then we have found a more specialized
+				// Branch and replace the old one with this one.
+				if isAncestor {
+					// replace aggregated branch if we have found a more specialized on
+					delete(result, aggregatedBranchId)
+					cachedAggregatedBranch.Release()
+
+					result[branchId] = cachedBranch
+
+					return nil
+				}
+			}
+
+			// store the branch as a new aggregate candidate if it was not found to be in any relation with the already
+			// aggregated ones.
+			result[branchId] = cachedBranch
+
+			return nil
+		}()
+
+		// abort if an error occurred while processing the current branch
+		if err != nil {
+			result.Release()
+			result = nil
+
+			return
+		}
+	}
+
+	return
+}
+
+func (branchManager *BranchManager) branchIsAncestorOfBranch(ancestor *Branch, descendant *Branch) (isAncestor bool, err error) {
+	if ancestor.Id() == descendant.Id() {
+		return true, nil
+	}
+
+	ancestorBranches, err := branchManager.getAncestorBranches(descendant)
+	if err != nil {
+		return
+	}
+
+	ancestorBranches.Consume(func(ancestorOfDescendant *Branch) {
+		if ancestorOfDescendant.Id() == ancestor.Id() {
+			isAncestor = true
+		}
+	})
+
+	return
+}
+
+func (branchManager *BranchManager) getAncestorBranches(branch *Branch) (ancestorBranches CachedBranches, err error) {
+	// initialize result
+	ancestorBranches = make(CachedBranches)
+
+	// initialize stack
+	stack := list.New()
+	for _, parentRealityId := range branch.ParentBranches() {
+		stack.PushBack(parentRealityId)
+	}
+
+	// work through stack
+	for stack.Len() != 0 {
+		// iterate through the parents (in a func so we can used defer)
+		err = func() error {
+			// pop parent branch id from stack
+			firstStackElement := stack.Front()
+			defer stack.Remove(firstStackElement)
+			parentBranchId := stack.Front().Value.(BranchId)
+
+			// abort if the parent has been processed already
+			if _, branchProcessed := ancestorBranches[parentBranchId]; branchProcessed {
+				return nil
+			}
+
+			// load parent branch from database
+			cachedParentBranch := branchManager.GetBranch(parentBranchId)
+
+			// abort if the parent branch could not be founds (should never happen)
+			parentBranch := cachedParentBranch.Unwrap()
+			if parentBranch == nil {
+				cachedParentBranch.Release()
+
+				return fmt.Errorf("failed to unwrap branch '%s'", parentBranchId)
+			}
+
+			// store parent branch in result
+			ancestorBranches[parentBranchId] = cachedParentBranch
+
+			// queue parents for additional check (recursion)
+			for _, parentRealityId := range parentBranch.ParentBranches() {
+				stack.PushBack(parentRealityId)
+			}
+
+			return nil
+		}()
+
+		// abort if an error occurs while trying to process the parents
+		if err != nil {
+			ancestorBranches.Release()
+			ancestorBranches = nil
+
+			return
+		}
+	}
+
+	return
+}
+
+// Prune resets the database and deletes all objects (for testing or "node resets").
+func (branchManager *BranchManager) Prune() (err error) {
+	for _, storage := range []*objectstorage.ObjectStorage{
+		branchManager.branchStorage,
+	} {
+		if err = storage.Prune(); err != nil {
+			return
+		}
+	}
+
+	branchManager.init()
+
+	return
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/child_branch.go b/dapps/valuetransfers/packages/branchmanager/child_branch.go
new file mode 100644
index 00000000..a4342628
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/child_branch.go
@@ -0,0 +1,147 @@
+package branchmanager
+
+import (
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+)
+
+type ChildBranch struct {
+	objectstorage.StorableObjectFlags
+
+	parentId BranchId
+	id       BranchId
+}
+
+func NewChildBranch(parentId BranchId, id BranchId) *ChildBranch {
+	return &ChildBranch{
+		parentId: parentId,
+		id:       id,
+	}
+}
+
+func ChildBranchFromBytes(bytes []byte, optionalTargetObject ...*ChildBranch) (result *ChildBranch, err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(bytes)
+	result, err = ParseChildBranch(marshalUtil, optionalTargetObject...)
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func ChildBranchFromStorageKey(key []byte, optionalTargetObject ...*ChildBranch) (result *ChildBranch, err error, consumedBytes int) {
+	// determine the target object that will hold the unmarshaled information
+	switch len(optionalTargetObject) {
+	case 0:
+		result = &ChildBranch{}
+	case 1:
+		result = optionalTargetObject[0]
+	default:
+		panic("too many arguments in call to ChildBranchFromStorageKey")
+	}
+
+	// parse the properties that are stored in the key
+	marshalUtil := marshalutil.New(key)
+	if result.parentId, err = ParseBranchId(marshalUtil); err != nil {
+		return
+	}
+	if result.id, err = ParseBranchId(marshalUtil); err != nil {
+		return
+	}
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func ParseChildBranch(marshalUtil *marshalutil.MarshalUtil, optionalTargetObject ...*ChildBranch) (result *ChildBranch, err error) {
+	if parsedObject, parseErr := marshalUtil.Parse(func(data []byte) (interface{}, error, int) {
+		return ChildBranchFromStorageKey(data, optionalTargetObject...)
+	}); parseErr != nil {
+		err = parseErr
+
+		return
+	} else {
+		result = parsedObject.(*ChildBranch)
+	}
+
+	if _, err = marshalUtil.Parse(func(data []byte) (parseResult interface{}, parseErr error, parsedBytes int) {
+		parseErr, parsedBytes = result.UnmarshalObjectStorageValue(data)
+
+		return
+	}); err != nil {
+		return
+	}
+
+	return
+}
+
+func (childBranch *ChildBranch) ParentId() BranchId {
+	return childBranch.parentId
+}
+
+func (childBranch *ChildBranch) Id() BranchId {
+	return childBranch.id
+}
+
+func (childBranch ChildBranch) ObjectStorageKey() []byte {
+	return marshalutil.New(ConflictIdLength + BranchIdLength).
+		WriteBytes(childBranch.parentId.Bytes()).
+		WriteBytes(childBranch.id.Bytes()).
+		Bytes()
+}
+
+func (childBranch ChildBranch) ObjectStorageValue() []byte {
+	return nil
+}
+
+func (childBranch ChildBranch) UnmarshalObjectStorageValue([]byte) (err error, consumedBytes int) {
+	return
+}
+
+func (childBranch ChildBranch) Update(other objectstorage.StorableObject) {
+	panic("updates are disabled - use the setters")
+}
+
+var _ objectstorage.StorableObject = &ChildBranch{}
+
+type CachedChildBranch struct {
+	objectstorage.CachedObject
+}
+
+func (cachedChildBranch *CachedChildBranch) Retain() *CachedChildBranch {
+	return &CachedChildBranch{cachedChildBranch.CachedObject.Retain()}
+}
+
+func (cachedChildBranch *CachedChildBranch) Unwrap() *ChildBranch {
+	if untypedObject := cachedChildBranch.Get(); untypedObject == nil {
+		return nil
+	} else {
+		if typedObject := untypedObject.(*ChildBranch); typedObject == nil || typedObject.IsDeleted() {
+			return nil
+		} else {
+			return typedObject
+		}
+	}
+}
+
+func (cachedChildBranch *CachedChildBranch) Consume(consumer func(childBranch *ChildBranch), forceRelease ...bool) (consumed bool) {
+	return cachedChildBranch.CachedObject.Consume(func(object objectstorage.StorableObject) {
+		consumer(object.(*ChildBranch))
+	}, forceRelease...)
+}
+
+type CachedChildBranches []*CachedChildBranch
+
+func (cachedChildBranches CachedChildBranches) Consume(consumer func(childBranch *ChildBranch)) (consumed bool) {
+	for _, cachedChildBranch := range cachedChildBranches {
+		consumed = cachedChildBranch.Consume(func(output *ChildBranch) {
+			consumer(output)
+		}) || consumed
+	}
+
+	return
+}
+
+func (cachedChildBranches CachedChildBranches) Release(force ...bool) {
+	for _, cachedChildBranch := range cachedChildBranches {
+		cachedChildBranch.Release(force...)
+	}
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/conflict.go b/dapps/valuetransfers/packages/branchmanager/conflict.go
new file mode 100644
index 00000000..e7d4f0e5
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/conflict.go
@@ -0,0 +1,134 @@
+package branchmanager
+
+import (
+	"sync"
+
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+	"github.com/iotaledger/hive.go/stringify"
+)
+
+type Conflict struct {
+	objectstorage.StorableObjectFlags
+
+	id          ConflictId
+	memberCount uint32
+
+	memberCountMutex sync.RWMutex
+}
+
+func NewConflict(id ConflictId) *Conflict {
+	return &Conflict{
+		id: id,
+	}
+}
+
+func (conflict *Conflict) Id() ConflictId {
+	return conflict.id
+}
+
+func (conflict *Conflict) MemberCount() int {
+	conflict.memberCountMutex.RLock()
+	defer conflict.memberCountMutex.RLock()
+
+	return int(conflict.memberCount)
+}
+
+func (conflict *Conflict) IncreaseMemberCount(optionalDelta ...int) (newMemberCount int) {
+	delta := uint32(1)
+	if len(optionalDelta) >= 1 {
+		delta = uint32(optionalDelta[0])
+	}
+
+	conflict.memberCountMutex.Lock()
+	defer conflict.memberCountMutex.Unlock()
+
+	conflict.memberCount = conflict.memberCount + delta
+
+	newMemberCount = int(conflict.memberCount)
+
+	return
+}
+
+func (conflict *Conflict) DecreaseMemberCount(optionalDelta ...int) (newMemberCount int) {
+	delta := uint32(1)
+	if len(optionalDelta) >= 1 {
+		delta = uint32(optionalDelta[0])
+	}
+
+	conflict.memberCountMutex.Lock()
+	defer conflict.memberCountMutex.Unlock()
+
+	conflict.memberCount = conflict.memberCount - delta
+
+	newMemberCount = int(conflict.memberCount)
+
+	return
+}
+
+func (conflict *Conflict) Bytes() []byte {
+	return marshalutil.New().
+		WriteBytes(conflict.ObjectStorageKey()).
+		WriteBytes(conflict.ObjectStorageValue()).
+		Bytes()
+}
+
+func (conflict *Conflict) String() string {
+	return stringify.Struct("Conflict",
+		stringify.StructField("id", conflict.id),
+		stringify.StructField("memberCount", conflict.MemberCount()),
+	)
+}
+
+func (conflict *Conflict) ObjectStorageKey() []byte {
+	return conflict.id.Bytes()
+}
+
+func (conflict *Conflict) ObjectStorageValue() []byte {
+	return marshalutil.New(marshalutil.UINT32_SIZE).
+		WriteUint32(conflict.memberCount).
+		Bytes()
+}
+
+func (conflict *Conflict) UnmarshalObjectStorageValue(valueBytes []byte) (err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(valueBytes)
+	conflict.memberCount, err = marshalUtil.ReadUint32()
+	if err != nil {
+		return
+	}
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func (conflict *Conflict) Update(other objectstorage.StorableObject) {
+	panic("updates are disabled - use the setters")
+}
+
+var _ objectstorage.StorableObject = &Conflict{}
+
+type CachedConflict struct {
+	objectstorage.CachedObject
+}
+
+func (cachedConflict *CachedConflict) Retain() *CachedConflict {
+	return &CachedConflict{cachedConflict.CachedObject.Retain()}
+}
+
+func (cachedConflict *CachedConflict) Unwrap() *Conflict {
+	if untypedObject := cachedConflict.Get(); untypedObject == nil {
+		return nil
+	} else {
+		if typedObject := untypedObject.(*Conflict); typedObject == nil || typedObject.IsDeleted() {
+			return nil
+		} else {
+			return typedObject
+		}
+	}
+}
+
+func (cachedConflict *CachedConflict) Consume(consumer func(branch *Conflict), forceRelease ...bool) (consumed bool) {
+	return cachedConflict.CachedObject.Consume(func(object objectstorage.StorableObject) {
+		consumer(object.(*Conflict))
+	}, forceRelease...)
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/conflict_id.go b/dapps/valuetransfers/packages/branchmanager/conflict_id.go
new file mode 100644
index 00000000..afbb3f58
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/conflict_id.go
@@ -0,0 +1,14 @@
+package branchmanager
+
+import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+)
+
+type ConflictId = transaction.OutputId
+
+var (
+	ParseConflictId     = transaction.ParseOutputId
+	ConflictIdFromBytes = transaction.OutputIdFromBytes
+)
+
+const ConflictIdLength = transaction.OutputIdLength
diff --git a/dapps/valuetransfers/packages/branchmanager/conflict_member.go b/dapps/valuetransfers/packages/branchmanager/conflict_member.go
new file mode 100644
index 00000000..61d78c2e
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/conflict_member.go
@@ -0,0 +1,147 @@
+package branchmanager
+
+import (
+	"github.com/iotaledger/hive.go/marshalutil"
+	"github.com/iotaledger/hive.go/objectstorage"
+)
+
+type ConflictMember struct {
+	objectstorage.StorableObjectFlags
+
+	conflictId ConflictId
+	branchId   BranchId
+}
+
+func NewConflictMember(conflictId ConflictId, branchId BranchId) *ConflictMember {
+	return &ConflictMember{
+		conflictId: conflictId,
+		branchId:   branchId,
+	}
+}
+
+func ConflictMemberFromBytes(bytes []byte, optionalTargetObject ...*ConflictMember) (result *ConflictMember, err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(bytes)
+	result, err = ParseConflictMember(marshalUtil, optionalTargetObject...)
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func ConflictMemberFromStorageKey(key []byte, optionalTargetObject ...*ConflictMember) (result *ConflictMember, err error, consumedBytes int) {
+	// determine the target object that will hold the unmarshaled information
+	switch len(optionalTargetObject) {
+	case 0:
+		result = &ConflictMember{}
+	case 1:
+		result = optionalTargetObject[0]
+	default:
+		panic("too many arguments in call to ConflictMemberFromStorageKey")
+	}
+
+	// parse the properties that are stored in the key
+	marshalUtil := marshalutil.New(key)
+	if result.conflictId, err = ParseConflictId(marshalUtil); err != nil {
+		return
+	}
+	if result.branchId, err = ParseBranchId(marshalUtil); err != nil {
+		return
+	}
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+func ParseConflictMember(marshalUtil *marshalutil.MarshalUtil, optionalTargetObject ...*ConflictMember) (result *ConflictMember, err error) {
+	if parsedObject, parseErr := marshalUtil.Parse(func(data []byte) (interface{}, error, int) {
+		return ConflictMemberFromStorageKey(data, optionalTargetObject...)
+	}); parseErr != nil {
+		err = parseErr
+
+		return
+	} else {
+		result = parsedObject.(*ConflictMember)
+	}
+
+	if _, err = marshalUtil.Parse(func(data []byte) (parseResult interface{}, parseErr error, parsedBytes int) {
+		parseErr, parsedBytes = result.UnmarshalObjectStorageValue(data)
+
+		return
+	}); err != nil {
+		return
+	}
+
+	return
+}
+
+func (conflictMember *ConflictMember) ConflictId() ConflictId {
+	return conflictMember.conflictId
+}
+
+func (conflictMember *ConflictMember) BranchId() BranchId {
+	return conflictMember.branchId
+}
+
+func (conflictMember ConflictMember) ObjectStorageKey() []byte {
+	return marshalutil.New(ConflictIdLength + BranchIdLength).
+		WriteBytes(conflictMember.conflictId.Bytes()).
+		WriteBytes(conflictMember.branchId.Bytes()).
+		Bytes()
+}
+
+func (conflictMember ConflictMember) ObjectStorageValue() []byte {
+	return nil
+}
+
+func (conflictMember ConflictMember) UnmarshalObjectStorageValue([]byte) (err error, consumedBytes int) {
+	return
+}
+
+func (conflictMember ConflictMember) Update(other objectstorage.StorableObject) {
+	panic("updates are disabled - use the setters")
+}
+
+var _ objectstorage.StorableObject = &ConflictMember{}
+
+type CachedConflictMember struct {
+	objectstorage.CachedObject
+}
+
+func (cachedConflictMember *CachedConflictMember) Retain() *CachedConflictMember {
+	return &CachedConflictMember{cachedConflictMember.CachedObject.Retain()}
+}
+
+func (cachedConflictMember *CachedConflictMember) Unwrap() *ConflictMember {
+	if untypedObject := cachedConflictMember.Get(); untypedObject == nil {
+		return nil
+	} else {
+		if typedObject := untypedObject.(*ConflictMember); typedObject == nil || typedObject.IsDeleted() {
+			return nil
+		} else {
+			return typedObject
+		}
+	}
+}
+
+func (cachedConflictMember *CachedConflictMember) Consume(consumer func(conflictMember *ConflictMember), forceRelease ...bool) (consumed bool) {
+	return cachedConflictMember.CachedObject.Consume(func(object objectstorage.StorableObject) {
+		consumer(object.(*ConflictMember))
+	}, forceRelease...)
+}
+
+type CachedConflictMembers []*CachedConflictMember
+
+func (cachedConflictMembers CachedConflictMembers) Consume(consumer func(conflictMember *ConflictMember)) (consumed bool) {
+	for _, cachedConflictMember := range cachedConflictMembers {
+		consumed = cachedConflictMember.Consume(func(output *ConflictMember) {
+			consumer(output)
+		}) || consumed
+	}
+
+	return
+}
+
+func (cachedConflictMembers CachedConflictMembers) Release(force ...bool) {
+	for _, cachedConflictMember := range cachedConflictMembers {
+		cachedConflictMember.Release(force...)
+	}
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/events.go b/dapps/valuetransfers/packages/branchmanager/events.go
new file mode 100644
index 00000000..b74fd482
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/events.go
@@ -0,0 +1,12 @@
+package branchmanager
+
+import (
+	"github.com/iotaledger/hive.go/events"
+)
+
+type Events struct {
+	BranchPreferred   *events.Event
+	BranchUnpreferred *events.Event
+	BranchLiked       *events.Event
+	BranchDisliked    *events.Event
+}
diff --git a/dapps/valuetransfers/packages/branchmanager/objectstorage.go b/dapps/valuetransfers/packages/branchmanager/objectstorage.go
new file mode 100644
index 00000000..af513752
--- /dev/null
+++ b/dapps/valuetransfers/packages/branchmanager/objectstorage.go
@@ -0,0 +1,31 @@
+package branchmanager
+
+import (
+	"time"
+
+	"github.com/iotaledger/hive.go/objectstorage"
+)
+
+const (
+	// the following values are a list of prefixes defined as an enum
+	_ byte = iota
+
+	// prefixes used for the objectstorage
+	osBranch
+)
+
+var (
+	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(false, objectstorage.LeakDetectionOptions{
+		MaxConsumersPerObject: 10,
+		MaxConsumerHoldTime:   10 * time.Second,
+	})
+
+	osBranchOptions = []objectstorage.Option{
+		objectstorage.CacheTime(60 * time.Second),
+		osLeakDetectionOption,
+	}
+)
+
+func osBranchFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return BranchFromStorageKey(key)
+}
diff --git a/dapps/valuetransfers/packages/ledgerstate/ledgerstate.go b/dapps/valuetransfers/packages/ledgerstate/ledgerstate.go
new file mode 100644
index 00000000..76bc1c0a
--- /dev/null
+++ b/dapps/valuetransfers/packages/ledgerstate/ledgerstate.go
@@ -0,0 +1,15 @@
+package ledgerstate
+
+import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/utxodag"
+)
+
+type LedgerState struct {
+	utxoDAG *utxodag.UTXODAG
+}
+
+func New(utxoDAG *utxodag.UTXODAG) *LedgerState {
+	return &LedgerState{
+		utxoDAG: utxoDAG,
+	}
+}
diff --git a/packages/binary/valuetransfer/payload/id.go b/dapps/valuetransfers/packages/payload/id.go
similarity index 100%
rename from packages/binary/valuetransfer/payload/id.go
rename to dapps/valuetransfers/packages/payload/id.go
diff --git a/packages/binary/valuetransfer/payload/id_test.go b/dapps/valuetransfers/packages/payload/id_test.go
similarity index 100%
rename from packages/binary/valuetransfer/payload/id_test.go
rename to dapps/valuetransfers/packages/payload/id_test.go
diff --git a/packages/binary/valuetransfer/payload/payload.go b/dapps/valuetransfers/packages/payload/payload.go
similarity index 99%
rename from packages/binary/valuetransfer/payload/payload.go
rename to dapps/valuetransfers/packages/payload/payload.go
index 33fdf159..3aadd4af 100644
--- a/packages/binary/valuetransfer/payload/payload.go
+++ b/dapps/valuetransfers/packages/payload/payload.go
@@ -8,8 +8,8 @@ import (
 	"github.com/iotaledger/hive.go/stringify"
 	"golang.org/x/crypto/blake2b"
 
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 	"github.com/iotaledger/goshimmer/packages/binary/messagelayer/payload"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
 )
 
 type Payload struct {
diff --git a/packages/binary/valuetransfer/payload/payload_test.go b/dapps/valuetransfers/packages/payload/payload_test.go
similarity index 90%
rename from packages/binary/valuetransfer/payload/payload_test.go
rename to dapps/valuetransfers/packages/payload/payload_test.go
index f13f8c89..cee322a9 100644
--- a/packages/binary/valuetransfer/payload/payload_test.go
+++ b/dapps/valuetransfers/packages/payload/payload_test.go
@@ -9,11 +9,11 @@ import (
 	"github.com/iotaledger/hive.go/identity"
 	"github.com/stretchr/testify/assert"
 
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 	"github.com/iotaledger/goshimmer/packages/binary/messagelayer/message"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address/signaturescheme"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
 )
 
 func ExamplePayload() {
@@ -28,7 +28,7 @@ func ExamplePayload() {
 		// outputs
 		transaction.NewOutputs(map[address.Address][]*balance.Balance{
 			address.Random(): {
-				balance.New(balance.COLOR_IOTA, 1337),
+				balance.New(balance.ColorIOTA, 1337),
 			},
 		}),
 	)
@@ -85,7 +85,7 @@ func TestPayload(t *testing.T) {
 
 			transaction.NewOutputs(map[address.Address][]*balance.Balance{
 				address.Random(): {
-					balance.New(balance.COLOR_IOTA, 1337),
+					balance.New(balance.ColorIOTA, 1337),
 				},
 			}),
 		).Sign(
diff --git a/packages/binary/valuetransfer/tangle/constants.go b/dapps/valuetransfers/packages/tangle/constants.go
similarity index 100%
rename from packages/binary/valuetransfer/tangle/constants.go
rename to dapps/valuetransfers/packages/tangle/constants.go
diff --git a/packages/binary/valuetransfer/tangle/events.go b/dapps/valuetransfers/packages/tangle/events.go
similarity index 58%
rename from packages/binary/valuetransfer/tangle/events.go
rename to dapps/valuetransfers/packages/tangle/events.go
index f36280cb..1e37eb6c 100644
--- a/packages/binary/valuetransfer/tangle/events.go
+++ b/dapps/valuetransfers/packages/tangle/events.go
@@ -3,8 +3,7 @@ package tangle
 import (
 	"github.com/iotaledger/hive.go/events"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 type Events struct {
@@ -14,10 +13,6 @@ type Events struct {
 	MissingPayloadReceived *events.Event
 	PayloadMissing         *events.Event
 	PayloadUnsolidifiable  *events.Event
-	TransactionRemoved     *events.Event
-	OutputMissing          *events.Event
-
-	TransactionSolid *events.Event
 }
 
 func newEvents() *Events {
@@ -27,9 +22,6 @@ func newEvents() *Events {
 		MissingPayloadReceived: events.NewEvent(cachedPayloadEvent),
 		PayloadMissing:         events.NewEvent(payloadIdEvent),
 		PayloadUnsolidifiable:  events.NewEvent(payloadIdEvent),
-		OutputMissing:          events.NewEvent(outputIdEvent),
-
-		TransactionSolid: events.NewEvent(transactionEvent),
 	}
 }
 
@@ -43,14 +35,3 @@ func cachedPayloadEvent(handler interface{}, params ...interface{}) {
 		params[1].(*CachedPayloadMetadata).Retain(),
 	)
 }
-
-func transactionEvent(handler interface{}, params ...interface{}) {
-	handler.(func(*transaction.Transaction, *CachedTransactionMetadata))(
-		params[0].(*transaction.Transaction),
-		params[1].(*CachedTransactionMetadata).Retain(),
-	)
-}
-
-func outputIdEvent(handler interface{}, params ...interface{}) {
-	handler.(func(transaction.OutputId))(params[0].(transaction.OutputId))
-}
diff --git a/packages/binary/valuetransfer/tangle/missingpayload.go b/dapps/valuetransfers/packages/tangle/missingpayload.go
similarity index 98%
rename from packages/binary/valuetransfer/tangle/missingpayload.go
rename to dapps/valuetransfers/packages/tangle/missingpayload.go
index 28a06de7..910d6578 100644
--- a/packages/binary/valuetransfer/tangle/missingpayload.go
+++ b/dapps/valuetransfers/packages/tangle/missingpayload.go
@@ -6,7 +6,7 @@ import (
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 // MissingPayload represents a payload that was referenced through branch or trunk but that is missing in our object
diff --git a/packages/binary/valuetransfer/tangle/objectstorage.go b/dapps/valuetransfers/packages/tangle/objectstorage.go
similarity index 56%
rename from packages/binary/valuetransfer/tangle/objectstorage.go
rename to dapps/valuetransfers/packages/tangle/objectstorage.go
index 421fdb41..7523a67b 100644
--- a/packages/binary/valuetransfer/tangle/objectstorage.go
+++ b/dapps/valuetransfers/packages/tangle/objectstorage.go
@@ -3,7 +3,7 @@ package tangle
 import (
 	"github.com/iotaledger/hive.go/objectstorage"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 const (
@@ -15,10 +15,6 @@ const (
 	osPayloadMetadata
 	osMissingPayload
 	osApprover
-	osAttachment
-	osOutput
-	osMissingOutput
-	osConsumer
 )
 
 func osPayloadFactory(key []byte) (objectstorage.StorableObject, error, int) {
@@ -36,19 +32,3 @@ func osMissingPayloadFactory(key []byte) (objectstorage.StorableObject, error, i
 func osPayloadApproverFactory(key []byte) (objectstorage.StorableObject, error, int) {
 	return PayloadApproverFromStorageKey(key)
 }
-
-func osAttachmentFactory(key []byte) (objectstorage.StorableObject, error, int) {
-	return AttachmentFromStorageKey(key)
-}
-
-func osOutputFactory(key []byte) (objectstorage.StorableObject, error, int) {
-	return OutputFromStorageKey(key)
-}
-
-func osMissingOutputFactory(key []byte) (objectstorage.StorableObject, error, int) {
-	return MissingOutputFromStorageKey(key)
-}
-
-func osConsumerFactory(key []byte) (objectstorage.StorableObject, error, int) {
-	return ConsumerFromStorageKey(key)
-}
diff --git a/packages/binary/valuetransfer/tangle/payloadapprover.go b/dapps/valuetransfers/packages/tangle/payloadapprover.go
similarity index 99%
rename from packages/binary/valuetransfer/tangle/payloadapprover.go
rename to dapps/valuetransfers/packages/tangle/payloadapprover.go
index 26aa76f6..ca605a01 100644
--- a/packages/binary/valuetransfer/tangle/payloadapprover.go
+++ b/dapps/valuetransfers/packages/tangle/payloadapprover.go
@@ -4,7 +4,7 @@ import (
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 // PayloadApprover is a database entity, that allows us to keep track of the "tangle structure" by encoding which
diff --git a/packages/binary/valuetransfer/tangle/payloadmetadata.go b/dapps/valuetransfers/packages/tangle/payloadmetadata.go
similarity index 99%
rename from packages/binary/valuetransfer/tangle/payloadmetadata.go
rename to dapps/valuetransfers/packages/tangle/payloadmetadata.go
index 981e64bc..3501c74a 100644
--- a/packages/binary/valuetransfer/tangle/payloadmetadata.go
+++ b/dapps/valuetransfers/packages/tangle/payloadmetadata.go
@@ -8,7 +8,7 @@ import (
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 // PayloadMetadata is a container for the metadata of a value transfer payload.
diff --git a/packages/binary/valuetransfer/tangle/payloadmetadata_test.go b/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
similarity index 95%
rename from packages/binary/valuetransfer/tangle/payloadmetadata_test.go
rename to dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
index cd50258b..84cb2e5a 100644
--- a/packages/binary/valuetransfer/tangle/payloadmetadata_test.go
+++ b/dapps/valuetransfers/packages/tangle/payloadmetadata_test.go
@@ -6,7 +6,7 @@ import (
 
 	"github.com/stretchr/testify/assert"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
 )
 
 func TestMarshalUnmarshal(t *testing.T) {
diff --git a/dapps/valuetransfers/packages/tangle/tangle.go b/dapps/valuetransfers/packages/tangle/tangle.go
new file mode 100644
index 00000000..240f0e9c
--- /dev/null
+++ b/dapps/valuetransfers/packages/tangle/tangle.go
@@ -0,0 +1,250 @@
+package tangle
+
+import (
+	"container/list"
+	"time"
+
+	"github.com/dgraph-io/badger/v2"
+	"github.com/iotaledger/hive.go/async"
+	"github.com/iotaledger/hive.go/objectstorage"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/packages/binary/storageprefix"
+)
+
+// Tangle represents the value tangle that consists out of value payloads.
+// It is an independent ontology, that lives inside the tangle.
+type Tangle struct {
+	payloadStorage         *objectstorage.ObjectStorage
+	payloadMetadataStorage *objectstorage.ObjectStorage
+	approverStorage        *objectstorage.ObjectStorage
+	missingPayloadStorage  *objectstorage.ObjectStorage
+
+	Events Events
+
+	storePayloadWorkerPool async.WorkerPool
+	solidifierWorkerPool   async.WorkerPool
+	cleanupWorkerPool      async.WorkerPool
+}
+
+func New(badgerInstance *badger.DB) (result *Tangle) {
+	osFactory := objectstorage.NewFactory(badgerInstance, storageprefix.ValueTransfers)
+
+	result = &Tangle{
+		payloadStorage:         osFactory.New(osPayload, osPayloadFactory, objectstorage.CacheTime(time.Second)),
+		payloadMetadataStorage: osFactory.New(osPayloadMetadata, osPayloadMetadataFactory, objectstorage.CacheTime(time.Second)),
+		missingPayloadStorage:  osFactory.New(osMissingPayload, osMissingPayloadFactory, objectstorage.CacheTime(time.Second)),
+		approverStorage:        osFactory.New(osApprover, osPayloadApproverFactory, objectstorage.CacheTime(time.Second), objectstorage.PartitionKey(payload.IdLength, payload.IdLength), objectstorage.KeysOnly(true)),
+
+		Events: *newEvents(),
+	}
+
+	return
+}
+
+// AttachPayload adds a new payload to the value tangle.
+func (tangle *Tangle) AttachPayload(payload *payload.Payload) {
+	tangle.storePayloadWorkerPool.Submit(func() { tangle.storePayloadWorker(payload) })
+}
+
+// GetPayload retrieves a payload from the object storage.
+func (tangle *Tangle) GetPayload(payloadId payload.Id) *payload.CachedPayload {
+	return &payload.CachedPayload{CachedObject: tangle.payloadStorage.Load(payloadId.Bytes())}
+}
+
+// GetPayloadMetadata retrieves the metadata of a value payload from the object storage.
+func (tangle *Tangle) GetPayloadMetadata(payloadId payload.Id) *CachedPayloadMetadata {
+	return &CachedPayloadMetadata{CachedObject: tangle.payloadMetadataStorage.Load(payloadId.Bytes())}
+}
+
+// GetApprovers retrieves the approvers of a payload from the object storage.
+func (tangle *Tangle) GetApprovers(payloadId payload.Id) CachedApprovers {
+	approvers := make(CachedApprovers, 0)
+	tangle.approverStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
+		approvers = append(approvers, &CachedPayloadApprover{CachedObject: cachedObject})
+
+		return true
+	}, payloadId.Bytes())
+
+	return approvers
+}
+
+// Shutdown stops the worker pools and shuts down the object storage instances.
+func (tangle *Tangle) Shutdown() *Tangle {
+	tangle.storePayloadWorkerPool.ShutdownGracefully()
+	tangle.solidifierWorkerPool.ShutdownGracefully()
+	tangle.cleanupWorkerPool.ShutdownGracefully()
+
+	tangle.payloadStorage.Shutdown()
+	tangle.payloadMetadataStorage.Shutdown()
+	tangle.approverStorage.Shutdown()
+	tangle.missingPayloadStorage.Shutdown()
+
+	return tangle
+}
+
+// Prune resets the database and deletes all objects (for testing or "node resets").
+func (tangle *Tangle) Prune() error {
+	for _, storage := range []*objectstorage.ObjectStorage{
+		tangle.payloadStorage,
+		tangle.payloadMetadataStorage,
+		tangle.approverStorage,
+		tangle.missingPayloadStorage,
+	} {
+		if err := storage.Prune(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// storePayloadWorker is the worker function that stores the payload and calls the corresponding storage events.
+func (tangle *Tangle) storePayloadWorker(payloadToStore *payload.Payload) {
+	// store the payload and transaction models
+	cachedPayload, cachedPayloadMetadata, payloadStored := tangle.storePayload(payloadToStore)
+	if !payloadStored {
+		// abort if we have seen the payload already
+		return
+	}
+
+	// store the references between the different entities (we do this after the actual entities were stored, so that
+	// all the metadata models exist in the database as soon as the entities are reachable by walks).
+	tangle.storePayloadReferences(payloadToStore)
+
+	// trigger events
+	if tangle.missingPayloadStorage.DeleteIfPresent(payloadToStore.Id().Bytes()) {
+		tangle.Events.MissingPayloadReceived.Trigger(cachedPayload, cachedPayloadMetadata)
+	}
+	tangle.Events.PayloadAttached.Trigger(cachedPayload, cachedPayloadMetadata)
+
+	// check solidity
+	tangle.solidifierWorkerPool.Submit(func() {
+		tangle.solidifyPayloadWorker(cachedPayload, cachedPayloadMetadata)
+	})
+}
+
+func (tangle *Tangle) storePayload(payloadToStore *payload.Payload) (cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, payloadStored bool) {
+	if _tmp, transactionIsNew := tangle.payloadStorage.StoreIfAbsent(payloadToStore); !transactionIsNew {
+		return
+	} else {
+		cachedPayload = &payload.CachedPayload{CachedObject: _tmp}
+		cachedMetadata = &CachedPayloadMetadata{CachedObject: tangle.payloadMetadataStorage.Store(NewPayloadMetadata(payloadToStore.Id()))}
+		payloadStored = true
+
+		return
+	}
+}
+
+func (tangle *Tangle) storePayloadReferences(payload *payload.Payload) {
+	// store trunk approver
+	trunkId := payload.TrunkId()
+	tangle.approverStorage.Store(NewPayloadApprover(trunkId, payload.Id())).Release()
+
+	// store branch approver
+	if branchId := payload.BranchId(); branchId != trunkId {
+		tangle.approverStorage.Store(NewPayloadApprover(branchId, trunkId)).Release()
+	}
+}
+
+func (tangle *Tangle) popElementsFromSolidificationStack(stack *list.List) (*payload.CachedPayload, *CachedPayloadMetadata) {
+	currentSolidificationEntry := stack.Front()
+	currentCachedPayload := currentSolidificationEntry.Value.([2]interface{})[0]
+	currentCachedMetadata := currentSolidificationEntry.Value.([2]interface{})[1]
+	stack.Remove(currentSolidificationEntry)
+
+	return currentCachedPayload.(*payload.CachedPayload), currentCachedMetadata.(*CachedPayloadMetadata)
+}
+
+// solidifyPayloadWorker is the worker function that solidifies the payloads (recursively from past to present).
+func (tangle *Tangle) solidifyPayloadWorker(cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata) {
+	// initialize the stack
+	solidificationStack := list.New()
+	solidificationStack.PushBack([2]interface{}{cachedPayload, cachedMetadata})
+
+	// process payloads that are supposed to be checked for solidity recursively
+	for solidificationStack.Len() > 0 {
+		// execute logic inside a func, so we can use defer to release the objects
+		func() {
+			// retrieve cached objects
+			currentCachedPayload, currentCachedMetadata := tangle.popElementsFromSolidificationStack(solidificationStack)
+			defer currentCachedPayload.Release()
+			defer currentCachedMetadata.Release()
+
+			// unwrap cached objects
+			currentPayload := currentCachedPayload.Unwrap()
+			currentPayloadMetadata := currentCachedMetadata.Unwrap()
+
+			// abort if any of the retrieved models is nil or payload is not solid or it was set as solid already
+			if currentPayload == nil || currentPayloadMetadata == nil || !tangle.isPayloadSolid(currentPayload, currentPayloadMetadata) || !currentPayloadMetadata.SetSolid(true) {
+				return
+			}
+
+			// ... trigger solid event ...
+			tangle.Events.PayloadSolid.Trigger(currentCachedPayload, currentCachedMetadata)
+
+			// ... and schedule check of approvers
+			tangle.ForeachApprovers(currentPayload.Id(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata) {
+				solidificationStack.PushBack([2]interface{}{payload, payloadMetadata})
+			})
+		}()
+	}
+}
+
+func (tangle *Tangle) ForeachApprovers(payloadId payload.Id, consume func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata)) {
+	tangle.GetApprovers(payloadId).Consume(func(approver *PayloadApprover) {
+		approvingPayloadId := approver.GetApprovingPayloadId()
+		approvingCachedPayload := tangle.GetPayload(approvingPayloadId)
+
+		approvingCachedPayload.Consume(func(payload *payload.Payload) {
+			consume(approvingCachedPayload, tangle.GetPayloadMetadata(approvingPayloadId))
+		})
+	})
+}
+
+// isPayloadSolid returns true if the given payload is solid. A payload is considered to be solid solid, if it is either
+// already marked as solid or if its referenced payloads are marked as solid.
+func (tangle *Tangle) isPayloadSolid(payload *payload.Payload, metadata *PayloadMetadata) bool {
+	if payload == nil || payload.IsDeleted() {
+		return false
+	}
+
+	if metadata == nil || metadata.IsDeleted() {
+		return false
+	}
+
+	if metadata.IsSolid() {
+		return true
+	}
+
+	return tangle.isPayloadMarkedAsSolid(payload.TrunkId()) && tangle.isPayloadMarkedAsSolid(payload.BranchId())
+}
+
+// isPayloadMarkedAsSolid returns true if the payload was marked as solid already (by setting the corresponding flags
+// in its metadata.
+func (tangle *Tangle) isPayloadMarkedAsSolid(payloadId payload.Id) bool {
+	if payloadId == payload.GenesisId {
+		return true
+	}
+
+	transactionMetadataCached := tangle.GetPayloadMetadata(payloadId)
+	if transactionMetadata := transactionMetadataCached.Unwrap(); transactionMetadata == nil {
+		transactionMetadataCached.Release()
+
+		// if transaction is missing and was not reported as missing, yet
+		if cachedMissingPayload, missingPayloadStored := tangle.missingPayloadStorage.StoreIfAbsent(NewMissingPayload(payloadId)); missingPayloadStored {
+			cachedMissingPayload.Consume(func(object objectstorage.StorableObject) {
+				tangle.Events.PayloadMissing.Trigger(object.(*MissingPayload).GetId())
+			})
+		}
+
+		return false
+	} else if !transactionMetadata.IsSolid() {
+		transactionMetadataCached.Release()
+
+		return false
+	}
+	transactionMetadataCached.Release()
+
+	return true
+}
diff --git a/packages/binary/valuetransfer/transaction/id.go b/dapps/valuetransfers/packages/transaction/id.go
similarity index 100%
rename from packages/binary/valuetransfer/transaction/id.go
rename to dapps/valuetransfers/packages/transaction/id.go
diff --git a/packages/binary/valuetransfer/transaction/inputs.go b/dapps/valuetransfers/packages/transaction/inputs.go
similarity index 98%
rename from packages/binary/valuetransfer/transaction/inputs.go
rename to dapps/valuetransfers/packages/transaction/inputs.go
index 255a4850..4c34103f 100644
--- a/packages/binary/valuetransfer/transaction/inputs.go
+++ b/dapps/valuetransfers/packages/transaction/inputs.go
@@ -1,8 +1,8 @@
 package transaction
 
 import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 	"github.com/iotaledger/goshimmer/packages/binary/datastructure/orderedmap"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
 
 	"github.com/iotaledger/hive.go/marshalutil"
 )
diff --git a/packages/binary/valuetransfer/transaction/outputid.go b/dapps/valuetransfers/packages/transaction/outputid.go
similarity index 96%
rename from packages/binary/valuetransfer/transaction/outputid.go
rename to dapps/valuetransfers/packages/transaction/outputid.go
index 6f0640c2..95fbe0c6 100644
--- a/packages/binary/valuetransfer/transaction/outputid.go
+++ b/dapps/valuetransfers/packages/transaction/outputid.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/iotaledger/hive.go/marshalutil"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
 )
 
 // OutputId is the data type that represents the identifier for a Output.
diff --git a/packages/binary/valuetransfer/transaction/outputs.go b/dapps/valuetransfers/packages/transaction/outputs.go
similarity index 96%
rename from packages/binary/valuetransfer/transaction/outputs.go
rename to dapps/valuetransfers/packages/transaction/outputs.go
index 8e1cea98..dc208a29 100644
--- a/packages/binary/valuetransfer/transaction/outputs.go
+++ b/dapps/valuetransfers/packages/transaction/outputs.go
@@ -1,11 +1,12 @@
 package transaction
 
 import (
-	"github.com/iotaledger/goshimmer/packages/binary/datastructure/orderedmap"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
 	"github.com/iotaledger/hive.go/marshalutil"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/packages/binary/datastructure/orderedmap"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
 )
 
 type Outputs struct {
diff --git a/packages/binary/valuetransfer/transaction/signatures.go b/dapps/valuetransfers/packages/transaction/signatures.go
similarity index 97%
rename from packages/binary/valuetransfer/transaction/signatures.go
rename to dapps/valuetransfers/packages/transaction/signatures.go
index fd9a1424..79e2fa83 100644
--- a/packages/binary/valuetransfer/transaction/signatures.go
+++ b/dapps/valuetransfers/packages/transaction/signatures.go
@@ -3,9 +3,9 @@ package transaction
 import (
 	"github.com/iotaledger/hive.go/marshalutil"
 
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
 	"github.com/iotaledger/goshimmer/packages/binary/datastructure/orderedmap"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address/signaturescheme"
 )
 
 // Signatures represents a container for the address signatures of a value transfer.
diff --git a/packages/binary/valuetransfer/transaction/signatures_test.go b/dapps/valuetransfers/packages/transaction/signatures_test.go
similarity index 91%
rename from packages/binary/valuetransfer/transaction/signatures_test.go
rename to dapps/valuetransfers/packages/transaction/signatures_test.go
index a344780c..cc8aecef 100644
--- a/packages/binary/valuetransfer/transaction/signatures_test.go
+++ b/dapps/valuetransfers/packages/transaction/signatures_test.go
@@ -6,8 +6,8 @@ import (
 	"github.com/iotaledger/hive.go/crypto/ed25519"
 	"github.com/stretchr/testify/assert"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address/signaturescheme"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
 )
 
 func TestSignatures(t *testing.T) {
diff --git a/packages/binary/valuetransfer/transaction/transaction.go b/dapps/valuetransfers/packages/transaction/transaction.go
similarity index 98%
rename from packages/binary/valuetransfer/transaction/transaction.go
rename to dapps/valuetransfers/packages/transaction/transaction.go
index b1b4a918..fa82a911 100644
--- a/packages/binary/valuetransfer/transaction/transaction.go
+++ b/dapps/valuetransfers/packages/transaction/transaction.go
@@ -10,8 +10,8 @@ import (
 	"github.com/mr-tron/base58"
 	"golang.org/x/crypto/blake2b"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address/signaturescheme"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
 )
 
 // region IMPLEMENT Transaction ////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/packages/binary/valuetransfer/transaction/transaction_test.go b/dapps/valuetransfers/packages/transaction/transaction_test.go
similarity index 88%
rename from packages/binary/valuetransfer/transaction/transaction_test.go
rename to dapps/valuetransfers/packages/transaction/transaction_test.go
index 7235fffd..51a7b2aa 100644
--- a/packages/binary/valuetransfer/transaction/transaction_test.go
+++ b/dapps/valuetransfers/packages/transaction/transaction_test.go
@@ -5,11 +5,12 @@ import (
 	"strings"
 	"testing"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address/signaturescheme"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
 	"github.com/iotaledger/hive.go/crypto/ed25519"
 	"github.com/stretchr/testify/assert"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
 )
 
 func TestEmptyDataPayload(t *testing.T) {
@@ -17,7 +18,7 @@ func TestEmptyDataPayload(t *testing.T) {
 	addr := sigScheme.Address()
 	o1 := NewOutputId(addr, RandomId())
 	inputs := NewInputs(o1)
-	bal := balance.New(balance.COLOR_IOTA, 1)
+	bal := balance.New(balance.ColorIOTA, 1)
 	outputs := NewOutputs(map[address.Address][]*balance.Balance{addr: {bal}})
 	tx := New(inputs, outputs)
 	tx.Sign(sigScheme)
@@ -31,7 +32,7 @@ func TestShortDataPayload(t *testing.T) {
 	addr := sigScheme.Address()
 	o1 := NewOutputId(addr, RandomId())
 	inputs := NewInputs(o1)
-	bal := balance.New(balance.COLOR_IOTA, 1)
+	bal := balance.New(balance.ColorIOTA, 1)
 	outputs := NewOutputs(map[address.Address][]*balance.Balance{addr: {bal}})
 	tx := New(inputs, outputs)
 
@@ -63,7 +64,7 @@ func TestTooLongDataPayload(t *testing.T) {
 	addr := sigScheme.Address()
 	o1 := NewOutputId(addr, RandomId())
 	inputs := NewInputs(o1)
-	bal := balance.New(balance.COLOR_IOTA, 1)
+	bal := balance.New(balance.ColorIOTA, 1)
 	outputs := NewOutputs(map[address.Address][]*balance.Balance{addr: {bal}})
 	tx := New(inputs, outputs)
 
@@ -77,7 +78,7 @@ func TestMarshalingEmptyDataPayload(t *testing.T) {
 	addr := sigScheme.Address()
 	o1 := NewOutputId(addr, RandomId())
 	inputs := NewInputs(o1)
-	bal := balance.New(balance.COLOR_IOTA, 1)
+	bal := balance.New(balance.ColorIOTA, 1)
 	outputs := NewOutputs(map[address.Address][]*balance.Balance{addr: {bal}})
 	tx := New(inputs, outputs)
 
@@ -101,7 +102,7 @@ func TestMarshalingDataPayload(t *testing.T) {
 	addr := sigScheme.Address()
 	o1 := NewOutputId(addr, RandomId())
 	inputs := NewInputs(o1)
-	bal := balance.New(balance.COLOR_IOTA, 1)
+	bal := balance.New(balance.ColorIOTA, 1)
 	outputs := NewOutputs(map[address.Address][]*balance.Balance{addr: {bal}})
 	tx := New(inputs, outputs)
 
diff --git a/packages/binary/valuetransfer/tangle/attachment.go b/dapps/valuetransfers/packages/utxodag/attachment.go
similarity index 94%
rename from packages/binary/valuetransfer/tangle/attachment.go
rename to dapps/valuetransfers/packages/utxodag/attachment.go
index ae79f941..54dfa8db 100644
--- a/packages/binary/valuetransfer/tangle/attachment.go
+++ b/dapps/valuetransfers/packages/utxodag/attachment.go
@@ -1,12 +1,12 @@
-package tangle
+package utxodag
 
 import (
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 // Attachment stores the information which transaction was attached by which payload. We need this to be able to perform
@@ -150,6 +150,11 @@ type CachedAttachment struct {
 	objectstorage.CachedObject
 }
 
+// Retain overrides the underlying method to return a new CachedTransaction instead of a generic CachedObject.
+func (cachedAttachment *CachedAttachment) Retain() *CachedAttachment {
+	return &CachedAttachment{cachedAttachment.CachedObject.Retain()}
+}
+
 func (cachedAttachment *CachedAttachment) Unwrap() *Attachment {
 	if untypedObject := cachedAttachment.Get(); untypedObject == nil {
 		return nil
diff --git a/packages/binary/valuetransfer/tangle/consumer.go b/dapps/valuetransfers/packages/utxodag/consumer.go
similarity index 97%
rename from packages/binary/valuetransfer/tangle/consumer.go
rename to dapps/valuetransfers/packages/utxodag/consumer.go
index b1ff096c..34a0ac29 100644
--- a/packages/binary/valuetransfer/tangle/consumer.go
+++ b/dapps/valuetransfers/packages/utxodag/consumer.go
@@ -1,12 +1,12 @@
-package tangle
+package utxodag
 
 import (
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 var ConsumerPartitionKeys = objectstorage.PartitionKey([]int{address.Length, transaction.IdLength, transaction.IdLength}...)
diff --git a/dapps/valuetransfers/packages/utxodag/events.go b/dapps/valuetransfers/packages/utxodag/events.go
new file mode 100644
index 00000000..0ef598b6
--- /dev/null
+++ b/dapps/valuetransfers/packages/utxodag/events.go
@@ -0,0 +1,50 @@
+package utxodag
+
+import (
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+
+	"github.com/iotaledger/hive.go/events"
+)
+
+type Events struct {
+	// Get's called whenever a transaction
+	TransactionReceived *events.Event
+	TransactionBooked   *events.Event
+	Fork                *events.Event
+}
+
+func newEvents() *Events {
+	return &Events{
+		TransactionReceived: events.NewEvent(cachedTransactionEvent),
+		TransactionBooked:   events.NewEvent(transactionBookedEvent),
+		Fork:                events.NewEvent(forkEvent),
+	}
+}
+
+func transactionBookedEvent(handler interface{}, params ...interface{}) {
+	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, *branchmanager.CachedBranch, []transaction.OutputId, bool))(
+		params[0].(*transaction.CachedTransaction).Retain(),
+		params[1].(*CachedTransactionMetadata).Retain(),
+		params[2].(*branchmanager.CachedBranch).Retain(),
+		params[3].([]transaction.OutputId),
+		params[4].(bool),
+	)
+}
+
+func forkEvent(handler interface{}, params ...interface{}) {
+	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, *branchmanager.CachedBranch, []transaction.OutputId))(
+		params[0].(*transaction.CachedTransaction).Retain(),
+		params[1].(*CachedTransactionMetadata).Retain(),
+		params[2].(*branchmanager.CachedBranch).Retain(),
+		params[3].([]transaction.OutputId),
+	)
+}
+
+func cachedTransactionEvent(handler interface{}, params ...interface{}) {
+	handler.(func(*transaction.CachedTransaction, *CachedTransactionMetadata, *CachedAttachment))(
+		params[0].(*transaction.CachedTransaction).Retain(),
+		params[1].(*CachedTransactionMetadata).Retain(),
+		params[2].(*CachedAttachment).Retain(),
+	)
+}
diff --git a/packages/binary/valuetransfer/tangle/missingoutput.go b/dapps/valuetransfers/packages/utxodag/missingoutput.go
similarity index 96%
rename from packages/binary/valuetransfer/tangle/missingoutput.go
rename to dapps/valuetransfers/packages/utxodag/missingoutput.go
index 671eec10..976111b9 100644
--- a/packages/binary/valuetransfer/tangle/missingoutput.go
+++ b/dapps/valuetransfers/packages/utxodag/missingoutput.go
@@ -1,4 +1,4 @@
-package tangle
+package utxodag
 
 import (
 	"time"
@@ -6,8 +6,8 @@ import (
 	"github.com/iotaledger/hive.go/marshalutil"
 	"github.com/iotaledger/hive.go/objectstorage"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 var MissingOutputKeyPartitions = objectstorage.PartitionKey([]int{address.Length, transaction.IdLength}...)
diff --git a/dapps/valuetransfers/packages/utxodag/objectstorage.go b/dapps/valuetransfers/packages/utxodag/objectstorage.go
new file mode 100644
index 00000000..e3cb65c4
--- /dev/null
+++ b/dapps/valuetransfers/packages/utxodag/objectstorage.go
@@ -0,0 +1,48 @@
+package utxodag
+
+import (
+	"time"
+
+	"github.com/iotaledger/hive.go/objectstorage"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+)
+
+const (
+	// the following values are a list of prefixes defined as an enum
+	_ byte = iota
+
+	// prefixes used for the objectstorage
+	osTransaction
+	osTransactionMetadata
+	osAttachment
+	osOutput
+	osConsumer
+)
+
+var (
+	osLeakDetectionOption = objectstorage.LeakDetectionEnabled(true, objectstorage.LeakDetectionOptions{
+		MaxConsumersPerObject: 10,
+		MaxConsumerHoldTime:   10 * time.Second,
+	})
+)
+
+func osTransactionFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return transaction.FromStorageKey(key)
+}
+
+func osTransactionMetadataFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return TransactionMetadataFromStorageKey(key)
+}
+
+func osAttachmentFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return AttachmentFromStorageKey(key)
+}
+
+func osOutputFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return OutputFromStorageKey(key)
+}
+
+func osConsumerFactory(key []byte) (objectstorage.StorableObject, error, int) {
+	return ConsumerFromStorageKey(key)
+}
diff --git a/packages/binary/valuetransfer/tangle/output.go b/dapps/valuetransfers/packages/utxodag/output.go
similarity index 87%
rename from packages/binary/valuetransfer/tangle/output.go
rename to dapps/valuetransfers/packages/utxodag/output.go
index 1696031b..b24a1130 100644
--- a/packages/binary/valuetransfer/tangle/output.go
+++ b/dapps/valuetransfers/packages/utxodag/output.go
@@ -1,4 +1,4 @@
-package tangle
+package utxodag
 
 import (
 	"sync"
@@ -8,9 +8,10 @@ import (
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 var OutputKeyPartitions = objectstorage.PartitionKey([]int{address.Length, transaction.IdLength}...)
@@ -19,13 +20,14 @@ var OutputKeyPartitions = objectstorage.PartitionKey([]int{address.Length, trans
 type Output struct {
 	address            address.Address
 	transactionId      transaction.Id
-	branchId           BranchId
+	branchId           branchmanager.BranchId
 	solid              bool
 	solidificationTime time.Time
 	firstConsumer      transaction.Id
 	consumerCount      int
 	balances           []*balance.Balance
 
+	branchIdMutex           sync.RWMutex
 	solidMutex              sync.RWMutex
 	solidificationTimeMutex sync.RWMutex
 	consumerMutex           sync.RWMutex
@@ -35,7 +37,7 @@ type Output struct {
 }
 
 // NewOutput creates an Output that contains the balances and identifiers of a Transaction.
-func NewOutput(address address.Address, transactionId transaction.Id, branchId BranchId, balances []*balance.Balance) *Output {
+func NewOutput(address address.Address, transactionId transaction.Id, branchId branchmanager.BranchId, balances []*balance.Balance) *Output {
 	return &Output{
 		address:            address,
 		transactionId:      transactionId,
@@ -91,7 +93,7 @@ func OutputFromStorageKey(keyBytes []byte, optionalTargetObject ...*Output) (res
 	case 1:
 		result = optionalTargetObject[0]
 	default:
-		panic("too many arguments in call to OutputFromBytes")
+		panic("too many arguments in call to OutputFromStorageKey")
 	}
 
 	// parse information
@@ -125,10 +127,35 @@ func (output *Output) TransactionId() transaction.Id {
 }
 
 // BranchId returns the id of the ledger state branch, that this output was booked in.
-func (output *Output) BranchId() BranchId {
+func (output *Output) BranchId() branchmanager.BranchId {
+	output.branchIdMutex.RLock()
+	defer output.branchIdMutex.RUnlock()
+
 	return output.branchId
 }
 
+func (output *Output) SetBranchId(branchId branchmanager.BranchId) (modified bool) {
+	output.branchIdMutex.RLock()
+	if output.branchId == branchId {
+		output.branchIdMutex.RUnlock()
+
+		return
+	}
+
+	output.branchIdMutex.RUnlock()
+	output.branchIdMutex.Lock()
+	defer output.branchIdMutex.Unlock()
+
+	if output.branchId == branchId {
+		return
+	}
+
+	output.branchId = branchId
+	modified = true
+
+	return
+}
+
 // Solid returns true if the output has been marked as solid.
 func (output *Output) Solid() bool {
 	output.solidMutex.RLock()
@@ -214,7 +241,7 @@ func (output *Output) ObjectStorageValue() []byte {
 	balanceCount := len(output.balances)
 
 	// initialize helper
-	marshalUtil := marshalutil.New(BranchIdLength + marshalutil.BOOL_SIZE + marshalutil.TIME_SIZE + marshalutil.UINT32_SIZE + balanceCount*balance.Length)
+	marshalUtil := marshalutil.New(branchmanager.BranchIdLength + marshalutil.BOOL_SIZE + marshalutil.TIME_SIZE + marshalutil.UINT32_SIZE + balanceCount*balance.Length)
 	marshalUtil.WriteBytes(output.branchId.Bytes())
 	marshalUtil.WriteBool(output.solid)
 	marshalUtil.WriteTime(output.solidificationTime)
@@ -230,7 +257,7 @@ func (output *Output) ObjectStorageValue() []byte {
 // being stored in its key rather than the content of the database to reduce storage requirements.
 func (output *Output) UnmarshalObjectStorageValue(data []byte) (err error, consumedBytes int) {
 	marshalUtil := marshalutil.New(data)
-	if output.branchId, err = ParseBranchId(marshalUtil); err != nil {
+	if output.branchId, err = branchmanager.ParseBranchId(marshalUtil); err != nil {
 		return
 	}
 	if output.solid, err = marshalUtil.ReadBool(); err != nil {
diff --git a/packages/binary/valuetransfer/tangle/transactionmetadata.go b/dapps/valuetransfers/packages/utxodag/transactionmetadata.go
similarity index 60%
rename from packages/binary/valuetransfer/tangle/transactionmetadata.go
rename to dapps/valuetransfers/packages/utxodag/transactionmetadata.go
index 98a36a74..d7f75b49 100644
--- a/packages/binary/valuetransfer/tangle/transactionmetadata.go
+++ b/dapps/valuetransfers/packages/utxodag/transactionmetadata.go
@@ -1,4 +1,4 @@
-package tangle
+package utxodag
 
 import (
 	"sync"
@@ -8,7 +8,8 @@ import (
 	"github.com/iotaledger/hive.go/objectstorage"
 	"github.com/iotaledger/hive.go/stringify"
 
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
 )
 
 // TransactionMetadata contains the information of a Transaction, that are based on our local perception of things (i.e. if it is
@@ -17,10 +18,15 @@ type TransactionMetadata struct {
 	objectstorage.StorableObjectFlags
 
 	id                 transaction.Id
+	branchId           branchmanager.BranchId
 	solid              bool
+	finalized          bool
 	solidificationTime time.Time
+	finalizationTime   time.Time
 
+	branchIdMutex           sync.RWMutex
 	solidMutex              sync.RWMutex
+	finalizedMutex          sync.RWMutex
 	solidificationTimeMutex sync.RWMutex
 }
 
@@ -34,6 +40,17 @@ func NewTransactionMetadata(id transaction.Id) *TransactionMetadata {
 // TransactionMetadataFromBytes unmarshals a TransactionMetadata object from a sequence of bytes.
 // It either creates a new object or fills the optionally provided object with the parsed information.
 func TransactionMetadataFromBytes(bytes []byte, optionalTargetObject ...*TransactionMetadata) (result *TransactionMetadata, err error, consumedBytes int) {
+	marshalUtil := marshalutil.New(bytes)
+	result, err = ParseTransactionMetadata(marshalUtil, optionalTargetObject...)
+	consumedBytes = marshalUtil.ReadOffset()
+
+	return
+}
+
+// TransactionMetadataFromStorageKey get's called when we restore TransactionMetadata from the storage.
+// In contrast to other database models, it unmarshals some information from the key so we simply store the key before
+// it gets handed over to UnmarshalObjectStorageValue (by the ObjectStorage).
+func TransactionMetadataFromStorageKey(keyBytes []byte, optionalTargetObject ...*TransactionMetadata) (result *TransactionMetadata, err error, consumedBytes int) {
 	// determine the target object that will hold the unmarshaled information
 	switch len(optionalTargetObject) {
 	case 0:
@@ -41,18 +58,13 @@ func TransactionMetadataFromBytes(bytes []byte, optionalTargetObject ...*Transac
 	case 1:
 		result = optionalTargetObject[0]
 	default:
-		panic("too many arguments in call to TransactionMetadataFromBytes")
+		panic("too many arguments in call to TransactionMetadataFromStorageKey")
 	}
 
-	// parse the bytes
-	marshalUtil := marshalutil.New(bytes)
-	if result.id, err = transaction.ParseId(marshalUtil); err != nil {
-		return
-	}
-	if result.solidificationTime, err = marshalUtil.ReadTime(); err != nil {
-		return
-	}
-	if result.solid, err = marshalUtil.ReadBool(); err != nil {
+	// parse information
+	marshalUtil := marshalutil.New(keyBytes)
+	result.id, err = transaction.ParseId(marshalUtil)
+	if err != nil {
 		return
 	}
 	consumedBytes = marshalUtil.ReadOffset()
@@ -60,26 +72,27 @@ func TransactionMetadataFromBytes(bytes []byte, optionalTargetObject ...*Transac
 	return
 }
 
-// TransactionMetadataFromStorage is the factory method for TransactionMetadata objects stored in the objectstorage. The bytes and the content
-// will be filled by the objectstorage, by subsequently calling ObjectStorageValue.
-func TransactionMetadataFromStorage(storageKey []byte) objectstorage.StorableObject {
-	result := &TransactionMetadata{}
+// Parse is a wrapper for simplified unmarshaling of TransactionMetadata objects from a byte stream using the marshalUtil package.
+func ParseTransactionMetadata(marshalUtil *marshalutil.MarshalUtil, optionalTargetObject ...*TransactionMetadata) (result *TransactionMetadata, err error) {
+	if parsedObject, parseErr := marshalUtil.Parse(func(data []byte) (interface{}, error, int) {
+		return TransactionMetadataFromStorageKey(data, optionalTargetObject...)
+	}); parseErr != nil {
+		err = parseErr
 
-	var err error
-	if result.id, err = transaction.ParseId(marshalutil.New(storageKey)); err != nil {
-		panic(err)
+		return
+	} else {
+		result = parsedObject.(*TransactionMetadata)
 	}
 
-	return result
-}
+	if _, err = marshalUtil.Parse(func(data []byte) (parseResult interface{}, parseErr error, parsedBytes int) {
+		parseErr, parsedBytes = result.UnmarshalObjectStorageValue(data)
 
-// Parse is a wrapper for simplified unmarshaling of TransactionMetadata objects from a byte stream using the marshalUtil package.
-func ParseTransactionMetadata(marshalUtil *marshalutil.MarshalUtil) (*TransactionMetadata, error) {
-	if metadata, err := marshalUtil.Parse(func(data []byte) (interface{}, error, int) { return TransactionMetadataFromBytes(data) }); err != nil {
-		return nil, err
-	} else {
-		return metadata.(*TransactionMetadata), nil
+		return
+	}); err != nil {
+		return
 	}
+
+	return
 }
 
 // Id return the id of the Transaction that this TransactionMetadata is associated to.
@@ -87,6 +100,35 @@ func (transactionMetadata *TransactionMetadata) Id() transaction.Id {
 	return transactionMetadata.id
 }
 
+func (transactionMetadata *TransactionMetadata) BranchId() branchmanager.BranchId {
+	transactionMetadata.branchIdMutex.RLock()
+	defer transactionMetadata.branchIdMutex.RUnlock()
+
+	return transactionMetadata.branchId
+}
+
+func (transactionMetadata *TransactionMetadata) SetBranchId(branchId branchmanager.BranchId) (modified bool) {
+	transactionMetadata.branchIdMutex.RLock()
+	if transactionMetadata.branchId == branchId {
+		transactionMetadata.branchIdMutex.RUnlock()
+
+		return
+	}
+
+	transactionMetadata.branchIdMutex.RUnlock()
+	transactionMetadata.branchIdMutex.Lock()
+	defer transactionMetadata.branchIdMutex.Unlock()
+
+	if transactionMetadata.branchId == branchId {
+		return
+	}
+
+	transactionMetadata.branchId = branchId
+	modified = true
+
+	return
+}
+
 // Solid returns true if the Transaction has been marked as solid.
 func (transactionMetadata *TransactionMetadata) Solid() (result bool) {
 	transactionMetadata.solidMutex.RLock()
@@ -125,6 +167,45 @@ func (transactionMetadata *TransactionMetadata) SetSolid(solid bool) (modified b
 	return
 }
 
+func (transactionMetadata *TransactionMetadata) SetFinalized(finalized bool) (modified bool) {
+	transactionMetadata.finalizedMutex.RLock()
+	if transactionMetadata.finalized == finalized {
+		transactionMetadata.finalizedMutex.RUnlock()
+
+		return
+	}
+
+	transactionMetadata.finalizedMutex.RUnlock()
+	transactionMetadata.finalizedMutex.Lock()
+	defer transactionMetadata.finalizedMutex.Unlock()
+
+	if transactionMetadata.finalized == finalized {
+		return
+	}
+
+	transactionMetadata.finalized = finalized
+	if finalized {
+		transactionMetadata.finalizationTime = time.Now()
+	}
+	modified = true
+
+	return
+}
+
+func (transactionMetadata *TransactionMetadata) Finalized() bool {
+	transactionMetadata.finalizedMutex.RLock()
+	defer transactionMetadata.finalizedMutex.RUnlock()
+
+	return transactionMetadata.finalized
+}
+
+func (transactionMetadata *TransactionMetadata) FinalizationTime() time.Time {
+	transactionMetadata.finalizedMutex.RLock()
+	defer transactionMetadata.finalizedMutex.RUnlock()
+
+	return transactionMetadata.finalizationTime
+}
+
 // SoldificationTime returns the time when the Transaction was marked to be solid.
 func (transactionMetadata *TransactionMetadata) SoldificationTime() time.Time {
 	transactionMetadata.solidificationTimeMutex.RLock()
@@ -135,19 +216,20 @@ func (transactionMetadata *TransactionMetadata) SoldificationTime() time.Time {
 
 // Bytes marshals the TransactionMetadata object into a sequence of bytes.
 func (transactionMetadata *TransactionMetadata) Bytes() []byte {
-	marshalUtil := marshalutil.New()
-
-	marshalUtil.WriteBytes(transactionMetadata.id.Bytes())
-	marshalUtil.WriteTime(transactionMetadata.solidificationTime)
-	marshalUtil.WriteBool(transactionMetadata.solid)
-
-	return marshalUtil.Bytes()
+	return marshalutil.New(branchmanager.BranchIdLength + 2*marshalutil.TIME_SIZE + 2*marshalutil.BOOL_SIZE).
+		WriteBytes(transactionMetadata.BranchId().Bytes()).
+		WriteTime(transactionMetadata.solidificationTime).
+		WriteTime(transactionMetadata.finalizationTime).
+		WriteBool(transactionMetadata.solid).
+		WriteBool(transactionMetadata.finalized).
+		Bytes()
 }
 
 // String creates a human readable version of the metadata (for debug purposes).
 func (transactionMetadata *TransactionMetadata) String() string {
 	return stringify.Struct("transaction.TransactionMetadata",
-		stringify.StructField("payloadId", transactionMetadata.Id()),
+		stringify.StructField("id", transactionMetadata.Id()),
+		stringify.StructField("branchId", transactionMetadata.BranchId()),
 		stringify.StructField("solid", transactionMetadata.Solid()),
 		stringify.StructField("solidificationTime", transactionMetadata.SoldificationTime()),
 	)
@@ -172,7 +254,23 @@ func (transactionMetadata *TransactionMetadata) ObjectStorageValue() []byte {
 // UnmarshalObjectStorageValue restores the values of a TransactionMetadata object from a sequence of bytes and matches the
 // encoding.BinaryUnmarshaler interface.
 func (transactionMetadata *TransactionMetadata) UnmarshalObjectStorageValue(data []byte) (err error, consumedBytes int) {
-	_, err, consumedBytes = TransactionMetadataFromBytes(data, transactionMetadata)
+	marshalUtil := marshalutil.New(data)
+	if transactionMetadata.branchId, err = branchmanager.ParseBranchId(marshalUtil); err != nil {
+		return
+	}
+	if transactionMetadata.solidificationTime, err = marshalUtil.ReadTime(); err != nil {
+		return
+	}
+	if transactionMetadata.finalizationTime, err = marshalUtil.ReadTime(); err != nil {
+		return
+	}
+	if transactionMetadata.solid, err = marshalUtil.ReadBool(); err != nil {
+		return
+	}
+	if transactionMetadata.finalized, err = marshalUtil.ReadBool(); err != nil {
+		return
+	}
+	consumedBytes = marshalUtil.ReadOffset()
 
 	return
 }
diff --git a/dapps/valuetransfers/packages/utxodag/utxodag.go b/dapps/valuetransfers/packages/utxodag/utxodag.go
new file mode 100644
index 00000000..e27c6cfa
--- /dev/null
+++ b/dapps/valuetransfers/packages/utxodag/utxodag.go
@@ -0,0 +1,745 @@
+package utxodag
+
+import (
+	"container/list"
+	"errors"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/dgraph-io/badger/v2"
+	"github.com/iotaledger/hive.go/async"
+	"github.com/iotaledger/hive.go/events"
+	"github.com/iotaledger/hive.go/objectstorage"
+	"github.com/iotaledger/hive.go/types"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tangle"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/goshimmer/packages/binary/storageprefix"
+)
+
+type UTXODAG struct {
+	tangle        *tangle.Tangle
+	branchManager *branchmanager.BranchManager
+
+	transactionStorage         *objectstorage.ObjectStorage
+	transactionMetadataStorage *objectstorage.ObjectStorage
+	attachmentStorage          *objectstorage.ObjectStorage
+	outputStorage              *objectstorage.ObjectStorage
+	consumerStorage            *objectstorage.ObjectStorage
+
+	Events *Events
+
+	workerPool async.WorkerPool
+}
+
+func New(badgerInstance *badger.DB, tangle *tangle.Tangle) (result *UTXODAG) {
+	osFactory := objectstorage.NewFactory(badgerInstance, storageprefix.ValueTransfers)
+
+	result = &UTXODAG{
+		tangle:        tangle,
+		branchManager: branchmanager.New(badgerInstance),
+
+		transactionStorage:         osFactory.New(osTransaction, osTransactionFactory, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+		transactionMetadataStorage: osFactory.New(osTransactionMetadata, osTransactionMetadataFactory, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+		attachmentStorage:          osFactory.New(osAttachment, osAttachmentFactory, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+		outputStorage:              osFactory.New(osOutput, osOutputFactory, OutputKeyPartitions, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+		consumerStorage:            osFactory.New(osConsumer, osConsumerFactory, ConsumerPartitionKeys, objectstorage.CacheTime(time.Second), osLeakDetectionOption),
+
+		Events: newEvents(),
+	}
+
+	tangle.Events.PayloadSolid.Attach(events.NewClosure(result.ProcessSolidPayload))
+
+	return
+}
+
+func (utxoDAG *UTXODAG) BranchManager() *branchmanager.BranchManager {
+	return utxoDAG.branchManager
+}
+
+func (utxoDAG *UTXODAG) ProcessSolidPayload(cachedPayload *payload.CachedPayload, cachedMetadata *tangle.CachedPayloadMetadata) {
+	utxoDAG.workerPool.Submit(func() { utxoDAG.storeTransactionWorker(cachedPayload, cachedMetadata) })
+}
+
+func (utxoDAG *UTXODAG) Transaction(transactionId transaction.Id) *transaction.CachedTransaction {
+	return &transaction.CachedTransaction{CachedObject: utxoDAG.transactionStorage.Load(transactionId.Bytes())}
+}
+
+// GetPayloadMetadata retrieves the metadata of a value payload from the object storage.
+func (utxoDAG *UTXODAG) TransactionMetadata(transactionId transaction.Id) *CachedTransactionMetadata {
+	return &CachedTransactionMetadata{CachedObject: utxoDAG.transactionMetadataStorage.Load(transactionId.Bytes())}
+}
+
+func (utxoDAG *UTXODAG) GetTransactionOutput(outputId transaction.OutputId) *CachedOutput {
+	return &CachedOutput{CachedObject: utxoDAG.outputStorage.Load(outputId.Bytes())}
+}
+
+// GetConsumers retrieves the approvers of a payload from the object storage.
+func (utxoDAG *UTXODAG) GetConsumers(outputId transaction.OutputId) CachedConsumers {
+	consumers := make(CachedConsumers, 0)
+	utxoDAG.consumerStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
+		consumers = append(consumers, &CachedConsumer{CachedObject: cachedObject})
+
+		return true
+	}, outputId.Bytes())
+
+	return consumers
+}
+
+// GetAttachments retrieves the att of a payload from the object storage.
+func (utxoDAG *UTXODAG) GetAttachments(transactionId transaction.Id) CachedAttachments {
+	attachments := make(CachedAttachments, 0)
+	utxoDAG.attachmentStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
+		attachments = append(attachments, &CachedAttachment{CachedObject: cachedObject})
+
+		return true
+	}, transactionId.Bytes())
+
+	return attachments
+}
+
+// Shutdown stops the worker pools and shuts down the object storage instances.
+func (utxoDAG *UTXODAG) Shutdown() *UTXODAG {
+	utxoDAG.workerPool.ShutdownGracefully()
+
+	utxoDAG.transactionStorage.Shutdown()
+	utxoDAG.transactionMetadataStorage.Shutdown()
+	utxoDAG.outputStorage.Shutdown()
+	utxoDAG.consumerStorage.Shutdown()
+
+	return utxoDAG
+}
+
+// Prune resets the database and deletes all objects (for testing or "node resets").
+func (utxoDAG *UTXODAG) Prune() (err error) {
+	if err = utxoDAG.branchManager.Prune(); err != nil {
+		return
+	}
+
+	for _, storage := range []*objectstorage.ObjectStorage{
+		utxoDAG.transactionStorage,
+		utxoDAG.transactionMetadataStorage,
+		utxoDAG.outputStorage,
+		utxoDAG.consumerStorage,
+	} {
+		if err = storage.Prune(); err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func (utxoDAG *UTXODAG) storeTransactionWorker(cachedPayload *payload.CachedPayload, cachedPayloadMetadata *tangle.CachedPayloadMetadata) {
+	defer cachedPayload.Release()
+	defer cachedPayloadMetadata.Release()
+
+	// abort if the parameters are empty
+	solidPayload := cachedPayload.Unwrap()
+	if solidPayload == nil || cachedPayloadMetadata.Unwrap() == nil {
+		return
+	}
+
+	// store objects in database
+	cachedTransaction, cachedTransactionMetadata, cachedAttachment, transactionIsNew := utxoDAG.storeTransactionModels(solidPayload)
+
+	// abort if the attachment was previously processed already (nil == was not stored)
+	if cachedAttachment == nil {
+		cachedTransaction.Release()
+		cachedTransactionMetadata.Release()
+
+		return
+	}
+
+	// trigger events for a new transaction
+	if transactionIsNew {
+		utxoDAG.Events.TransactionReceived.Trigger(cachedTransaction, cachedTransactionMetadata, cachedAttachment)
+	}
+
+	// check solidity of transaction and its corresponding attachment
+	utxoDAG.solidifyTransactionWorker(cachedTransaction, cachedTransactionMetadata, cachedAttachment)
+}
+
+func (utxoDAG *UTXODAG) storeTransactionModels(solidPayload *payload.Payload) (cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *CachedTransactionMetadata, cachedAttachment *CachedAttachment, transactionIsNew bool) {
+	cachedTransaction = &transaction.CachedTransaction{CachedObject: utxoDAG.transactionStorage.ComputeIfAbsent(solidPayload.Transaction().Id().Bytes(), func(key []byte) objectstorage.StorableObject {
+		transactionIsNew = true
+
+		result := solidPayload.Transaction()
+		result.Persist()
+		result.SetModified()
+
+		return result
+	})}
+
+	if transactionIsNew {
+		cachedTransactionMetadata = &CachedTransactionMetadata{CachedObject: utxoDAG.transactionMetadataStorage.Store(NewTransactionMetadata(solidPayload.Transaction().Id()))}
+
+		// store references to the consumed outputs
+		solidPayload.Transaction().Inputs().ForEach(func(outputId transaction.OutputId) bool {
+			utxoDAG.consumerStorage.Store(NewConsumer(outputId, solidPayload.Transaction().Id())).Release()
+
+			return true
+		})
+	} else {
+		cachedTransactionMetadata = &CachedTransactionMetadata{CachedObject: utxoDAG.transactionMetadataStorage.Load(solidPayload.Transaction().Id().Bytes())}
+	}
+
+	// store a reference from the transaction to the payload that attached it or abort, if we have processed this attachment already
+	attachment, stored := utxoDAG.attachmentStorage.StoreIfAbsent(NewAttachment(solidPayload.Transaction().Id(), solidPayload.Id()))
+	if !stored {
+		return
+	}
+	cachedAttachment = &CachedAttachment{CachedObject: attachment}
+
+	return
+}
+
+func (utxoDAG *UTXODAG) solidifyTransactionWorker(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetdata *CachedTransactionMetadata, attachment *CachedAttachment) {
+	// initialize the stack
+	solidificationStack := list.New()
+	solidificationStack.PushBack([3]interface{}{cachedTransaction, cachedTransactionMetdata, attachment})
+
+	// process payloads that are supposed to be checked for solidity recursively
+	for solidificationStack.Len() > 0 {
+		// execute logic inside a func, so we can use defer to release the objects
+		func() {
+			// retrieve cached objects
+			currentCachedTransaction, currentCachedTransactionMetadata, currentCachedAttachment := utxoDAG.popElementsFromSolidificationStack(solidificationStack)
+			defer currentCachedTransaction.Release()
+			defer currentCachedTransactionMetadata.Release()
+			defer currentCachedAttachment.Release()
+
+			// unwrap cached objects
+			currentTransaction := currentCachedTransaction.Unwrap()
+			currentTransactionMetadata := currentCachedTransactionMetadata.Unwrap()
+			currentAttachment := currentCachedAttachment.Unwrap()
+
+			// abort if any of the retrieved models is nil or payload is not solid or it was set as solid already
+			if currentTransaction == nil || currentTransactionMetadata == nil || currentAttachment == nil {
+				return
+			}
+
+			// abort if the transaction is not solid or invalid
+			if transactionSolid, err := utxoDAG.isTransactionSolid(currentTransaction, currentTransactionMetadata); !transactionSolid || err != nil {
+				if err != nil {
+					// TODO: TRIGGER INVALID TX + REMOVE TXS THAT APPROVE IT
+					fmt.Println(err, currentTransaction)
+				}
+
+				return
+			}
+
+			transactionBecameNewlySolid := currentTransactionMetadata.SetSolid(true)
+			if !transactionBecameNewlySolid {
+				// TODO: book attachment
+
+				return
+			}
+
+			// ... and schedule check of approvers
+			utxoDAG.ForEachConsumers(currentTransaction, func(cachedTransaction *transaction.CachedTransaction, transactionMetadata *CachedTransactionMetadata, cachedAttachment *CachedAttachment) {
+				solidificationStack.PushBack([3]interface{}{cachedTransaction, transactionMetadata, cachedAttachment})
+			})
+
+			// TODO: BOOK TRANSACTION
+			utxoDAG.bookTransaction(cachedTransaction.Retain(), cachedTransactionMetdata.Retain())
+		}()
+	}
+}
+
+func (utxoDAG *UTXODAG) popElementsFromSolidificationStack(stack *list.List) (*transaction.CachedTransaction, *CachedTransactionMetadata, *CachedAttachment) {
+	currentSolidificationEntry := stack.Front()
+	cachedTransaction := currentSolidificationEntry.Value.([3]interface{})[0].(*transaction.CachedTransaction)
+	cachedTransactionMetadata := currentSolidificationEntry.Value.([3]interface{})[1].(*CachedTransactionMetadata)
+	cachedAttachment := currentSolidificationEntry.Value.([3]interface{})[2].(*CachedAttachment)
+	stack.Remove(currentSolidificationEntry)
+
+	return cachedTransaction, cachedTransactionMetadata, cachedAttachment
+}
+
+func (utxoDAG *UTXODAG) isTransactionSolid(tx *transaction.Transaction, metadata *TransactionMetadata) (bool, error) {
+	// abort if any of the models are nil or has been deleted
+	if tx == nil || tx.IsDeleted() || metadata == nil || metadata.IsDeleted() {
+		return false, nil
+	}
+
+	// abort if we have previously determined the solidity status of the transaction already
+	if metadata.Solid() {
+		return true, nil
+	}
+
+	// get outputs that were referenced in the transaction inputs
+	cachedInputs := utxoDAG.getCachedOutputsFromTransactionInputs(tx)
+	defer cachedInputs.Release()
+
+	// check the solidity of the inputs and retrieve the consumed balances
+	inputsSolid, consumedBalances, err := utxoDAG.checkTransactionInputs(cachedInputs)
+
+	// abort if an error occurred or the inputs are not solid, yet
+	if !inputsSolid || err != nil {
+		return false, err
+	}
+
+	if !utxoDAG.checkTransactionOutputs(consumedBalances, tx.Outputs()) {
+		return false, fmt.Errorf("the outputs do not match the inputs in transaction with id '%s'", tx.Id())
+	}
+
+	return true, nil
+}
+
+func (utxoDAG *UTXODAG) getCachedOutputsFromTransactionInputs(tx *transaction.Transaction) (result CachedOutputs) {
+	result = make(CachedOutputs)
+	tx.Inputs().ForEach(func(inputId transaction.OutputId) bool {
+		result[inputId] = utxoDAG.GetTransactionOutput(inputId)
+
+		return true
+	})
+
+	return
+}
+
+func (utxoDAG *UTXODAG) checkTransactionInputs(cachedInputs CachedOutputs) (inputsSolid bool, consumedBalances map[balance.Color]int64, err error) {
+	inputsSolid = true
+	consumedBalances = make(map[balance.Color]int64)
+
+	for _, cachedInput := range cachedInputs {
+		if !cachedInput.Exists() {
+			inputsSolid = false
+
+			continue
+		}
+
+		// should never be nil as we check Exists() before
+		input := cachedInput.Unwrap()
+
+		// update solid status
+		inputsSolid = inputsSolid && input.Solid()
+
+		// calculate the input balances
+		for _, inputBalance := range input.Balances() {
+			var newBalance int64
+			if currentBalance, balanceExists := consumedBalances[inputBalance.Color()]; balanceExists {
+				// check overflows in the numbers
+				if inputBalance.Value() > math.MaxInt64-currentBalance {
+					err = fmt.Errorf("buffer overflow in balances of inputs")
+
+					return
+				}
+
+				newBalance = currentBalance + inputBalance.Value()
+			} else {
+				newBalance = inputBalance.Value()
+			}
+			consumedBalances[inputBalance.Color()] = newBalance
+		}
+	}
+
+	return
+}
+
+// checkTransactionOutputs is a utility function that returns true, if the outputs are consuming all of the given inputs
+// (the sum of all the balance changes is 0). It also accounts for the ability to "recolor" coins during the creating of
+// outputs. If this function returns false, then the outputs that are defined in the transaction are invalid and the
+// transaction should be removed from the ledger state.
+func (utxoDAG *UTXODAG) checkTransactionOutputs(inputBalances map[balance.Color]int64, outputs *transaction.Outputs) bool {
+	// create a variable to keep track of outputs that create a new color
+	var newlyColoredCoins int64
+
+	// iterate through outputs and check them one by one
+	aborted := !outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool {
+		for _, outputBalance := range balances {
+			// abort if the output creates a negative or empty output
+			if outputBalance.Value() <= 0 {
+				return false
+			}
+
+			// sidestep logic if we have a newly colored output (we check the supply later)
+			if outputBalance.Color() == balance.ColorNew {
+				// catch overflows
+				if newlyColoredCoins > math.MaxInt64-outputBalance.Value() {
+					return false
+				}
+
+				newlyColoredCoins += outputBalance.Value()
+
+				continue
+			}
+
+			// check if the used color does not exist in our supply
+			availableBalance, spentColorExists := inputBalances[outputBalance.Color()]
+			if !spentColorExists {
+				return false
+			}
+
+			// abort if we spend more coins of the given color than we have
+			if availableBalance < outputBalance.Value() {
+				return false
+			}
+
+			// subtract the spent coins from the supply of this transaction
+			inputBalances[outputBalance.Color()] -= outputBalance.Value()
+
+			// cleanup the entry in the supply map if we have exhausted all funds
+			if inputBalances[outputBalance.Color()] == 0 {
+				delete(inputBalances, outputBalance.Color())
+			}
+		}
+
+		return true
+	})
+
+	// abort if the previous checks failed
+	if aborted {
+		return false
+	}
+
+	// determine the unspent inputs
+	var unspentCoins int64
+	for _, unspentBalance := range inputBalances {
+		// catch overflows
+		if unspentCoins > math.MaxInt64-unspentBalance {
+			return false
+		}
+
+		unspentCoins += unspentBalance
+	}
+
+	// the outputs are valid if they spend all outputs
+	return unspentCoins == newlyColoredCoins
+}
+
+func (utxoDAG *UTXODAG) ForEachConsumers(currentTransaction *transaction.Transaction, consume func(cachedTransaction *transaction.CachedTransaction, transactionMetadata *CachedTransactionMetadata, cachedAttachment *CachedAttachment)) {
+	seenTransactions := make(map[transaction.Id]types.Empty)
+	currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+		utxoDAG.GetConsumers(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(consumer *Consumer) {
+			if _, transactionSeen := seenTransactions[consumer.TransactionId()]; !transactionSeen {
+				seenTransactions[consumer.TransactionId()] = types.Void
+
+				cachedTransaction := utxoDAG.Transaction(consumer.TransactionId())
+				cachedTransactionMetadata := utxoDAG.TransactionMetadata(consumer.TransactionId())
+				for _, cachedAttachment := range utxoDAG.GetAttachments(consumer.TransactionId()) {
+					consume(cachedTransaction, cachedTransactionMetadata, cachedAttachment)
+				}
+			}
+		})
+
+		return true
+	})
+}
+
+func (utxoDAG *UTXODAG) bookTransaction(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *CachedTransactionMetadata) (err error) {
+	defer cachedTransaction.Release()
+	defer cachedTransactionMetadata.Release()
+
+	transactionToBook := cachedTransaction.Unwrap()
+	if transactionToBook == nil {
+		err = errors.New("failed to unwrap transaction")
+
+		return
+	}
+
+	transactionMetadata := cachedTransactionMetadata.Unwrap()
+	if transactionMetadata == nil {
+		err = errors.New("failed to unwrap transaction metadata")
+
+		return
+	}
+
+	consumedBranches := make(branchmanager.BranchIds)
+	conflictingInputs := make([]transaction.OutputId, 0)
+	conflictingInputsOfConflictingConsumers := make(map[transaction.Id][]transaction.OutputId)
+
+	if !transactionToBook.Inputs().ForEach(func(outputId transaction.OutputId) bool {
+		cachedOutput := utxoDAG.GetTransactionOutput(outputId)
+		defer cachedOutput.Release()
+
+		// abort if the output could not be found
+		output := cachedOutput.Unwrap()
+		if output == nil {
+			err = fmt.Errorf("could not load output '%s'", outputId)
+
+			return false
+		}
+
+		consumedBranches[output.BranchId()] = types.Void
+
+		// continue if we are the first consumer and there is no double spend
+		consumerCount, firstConsumerId := output.RegisterConsumer(transactionToBook.Id())
+		if consumerCount == 0 {
+			return true
+		}
+
+		// keep track of conflicting inputs
+		conflictingInputs = append(conflictingInputs, outputId)
+
+		// also keep track of conflicting inputs of previous consumers
+		if consumerCount == 1 {
+			if _, conflictingInputsExist := conflictingInputsOfConflictingConsumers[firstConsumerId]; !conflictingInputsExist {
+				conflictingInputsOfConflictingConsumers[firstConsumerId] = make([]transaction.OutputId, 0)
+			}
+
+			conflictingInputsOfConflictingConsumers[firstConsumerId] = append(conflictingInputsOfConflictingConsumers[firstConsumerId], outputId)
+		}
+
+		return true
+	}) {
+		return
+	}
+
+	cachedTargetBranch, _ := utxoDAG.branchManager.InheritBranches(consumedBranches.ToList()...)
+	defer cachedTargetBranch.Release()
+
+	targetBranch := cachedTargetBranch.Unwrap()
+	if targetBranch == nil {
+		return errors.New("failed to unwrap target branch")
+	}
+	targetBranch.Persist()
+
+	if len(conflictingInputs) >= 1 {
+		cachedTargetBranch = utxoDAG.branchManager.AddBranch(branchmanager.NewBranch(branchmanager.NewBranchId(transactionToBook.Id()), []branchmanager.BranchId{targetBranch.Id()}, conflictingInputs))
+		defer cachedTargetBranch.Release()
+
+		targetBranch = cachedTargetBranch.Unwrap()
+		if targetBranch == nil {
+			return errors.New("failed to inherit branches")
+		}
+
+		// TODO: CREATE / RETRIEVE CONFLICT SETS + ADD TARGET REALITY TO THEM
+		/*
+			for _, conflictingInput := range conflictingInputs {
+
+			}
+		*/
+	}
+
+	// book transaction into target reality
+	transactionMetadata.SetBranchId(targetBranch.Id())
+
+	// book outputs into the target branch
+	transactionToBook.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+		newOutput := NewOutput(address, transactionToBook.Id(), targetBranch.Id(), balances)
+		newOutput.SetSolid(true)
+		utxoDAG.outputStorage.Store(newOutput).Release()
+
+		return true
+	})
+
+	// fork the conflicting transactions into their own branch
+	previousConsumerForked := false
+	for consumerId, conflictingInputs := range conflictingInputsOfConflictingConsumers {
+		consumerForked, forkedErr := utxoDAG.Fork(consumerId, conflictingInputs)
+		if forkedErr != nil {
+			err = forkedErr
+
+			return
+		}
+
+		previousConsumerForked = previousConsumerForked || consumerForked
+	}
+
+	// trigger events
+	utxoDAG.Events.TransactionBooked.Trigger(cachedTransaction, cachedTransactionMetadata, cachedTargetBranch, conflictingInputs, previousConsumerForked)
+
+	// TODO: BOOK ATTACHMENT
+
+	return
+}
+
+func (utxoDAG *UTXODAG) calculateBranchOfTransaction(currentTransaction *transaction.Transaction) (branch *branchmanager.CachedBranch, err error) {
+	consumedBranches := make(branchmanager.BranchIds)
+	if !currentTransaction.Inputs().ForEach(func(outputId transaction.OutputId) bool {
+		cachedTransactionOutput := utxoDAG.GetTransactionOutput(outputId)
+		defer cachedTransactionOutput.Release()
+
+		transactionOutput := cachedTransactionOutput.Unwrap()
+		if transactionOutput == nil {
+			err = fmt.Errorf("failed to load output '%s'", outputId)
+
+			return false
+		}
+
+		consumedBranches[transactionOutput.BranchId()] = types.Void
+
+		return true
+	}) {
+		return
+	}
+
+	branch, err = utxoDAG.branchManager.InheritBranches(consumedBranches.ToList()...)
+
+	return
+}
+
+func (utxoDAG *UTXODAG) moveTransactionToBranch(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *CachedTransactionMetadata, cachedTargetBranch *branchmanager.CachedBranch) (err error) {
+	// push transaction that shall be moved to the stack
+	transactionStack := list.New()
+	branchStack := list.New()
+	branchStack.PushBack([3]interface{}{cachedTransactionMetadata.Unwrap().BranchId(), cachedTargetBranch, transactionStack})
+	transactionStack.PushBack([2]interface{}{cachedTransaction, cachedTransactionMetadata})
+
+	// iterate through all transactions (grouped by their branch)
+	for branchStack.Len() >= 1 {
+		if err = func() error {
+			// retrieve branch details from stack
+			currentSolidificationEntry := branchStack.Front()
+			currentSourceBranch := currentSolidificationEntry.Value.([3]interface{})[0].(branchmanager.BranchId)
+			currentCachedTargetBranch := currentSolidificationEntry.Value.([3]interface{})[1].(*branchmanager.CachedBranch)
+			transactionStack := currentSolidificationEntry.Value.([3]interface{})[2].(*list.List)
+			branchStack.Remove(currentSolidificationEntry)
+			defer currentCachedTargetBranch.Release()
+
+			// unpack target branch
+			targetBranch := currentCachedTargetBranch.Unwrap()
+			if targetBranch == nil {
+				return errors.New("failed to unpack branch")
+			}
+
+			// iterate through transactions
+			for transactionStack.Len() >= 1 {
+				if err = func() error {
+					// retrieve transaction details from stack
+					currentSolidificationEntry := transactionStack.Front()
+					currentCachedTransaction := currentSolidificationEntry.Value.([2]interface{})[0].(*transaction.CachedTransaction)
+					currentCachedTransactionMetadata := currentSolidificationEntry.Value.([2]interface{})[1].(*CachedTransactionMetadata)
+					transactionStack.Remove(currentSolidificationEntry)
+					defer currentCachedTransaction.Release()
+					defer currentCachedTransactionMetadata.Release()
+
+					// unwrap transaction
+					currentTransaction := currentCachedTransaction.Unwrap()
+					if currentTransaction == nil {
+						return errors.New("failed to unwrap transaction")
+					}
+
+					// unwrap transaction metadata
+					currentTransactionMetadata := currentCachedTransactionMetadata.Unwrap()
+					if currentTransactionMetadata == nil {
+						return errors.New("failed to unwrap transaction metadata")
+					}
+
+					// if we arrived at a nested branch
+					if currentTransactionMetadata.BranchId() != currentSourceBranch {
+						// determine the new branch of the transaction
+						newCachedTargetBranch, branchErr := utxoDAG.calculateBranchOfTransaction(currentTransaction)
+						if branchErr != nil {
+							return branchErr
+						}
+						defer newCachedTargetBranch.Release()
+
+						// unwrap the branch
+						newTargetBranch := newCachedTargetBranch.Unwrap()
+						if newTargetBranch == nil {
+							return errors.New("failed to unwrap branch")
+						}
+						newTargetBranch.Persist()
+
+						// add the new branch (with the current transaction as a starting point to the branch stack)
+						newTransactionStack := list.New()
+						newTransactionStack.PushBack([2]interface{}{currentCachedTransaction.Retain(), currentCachedTransactionMetadata.Retain()})
+						branchStack.PushBack([3]interface{}{currentTransactionMetadata.BranchId(), newCachedTargetBranch.Retain(), newTransactionStack})
+
+						return nil
+					}
+
+					// abort if we did not modify the branch of the transaction
+					if !currentTransactionMetadata.SetBranchId(targetBranch.Id()) {
+						return nil
+					}
+
+					// iterate through the outputs of the moved transaction
+					currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
+						// create reference to the output
+						outputId := transaction.NewOutputId(address, currentTransaction.Id())
+
+						// load output from database
+						cachedOutput := utxoDAG.GetTransactionOutput(outputId)
+						defer cachedOutput.Release()
+
+						// unwrap output
+						output := cachedOutput.Unwrap()
+						if output == nil {
+							err = fmt.Errorf("failed to load output '%s'", outputId)
+
+							return false
+						}
+
+						// abort if the output was moved already
+						if !output.SetBranchId(targetBranch.Id()) {
+							return true
+						}
+
+						// schedule consumers for further checks
+						consumingTransactions := make(map[transaction.Id]types.Empty)
+						utxoDAG.GetConsumers(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(consumer *Consumer) {
+							consumingTransactions[consumer.TransactionId()] = types.Void
+						})
+						for transactionId := range consumingTransactions {
+							transactionStack.PushBack([2]interface{}{utxoDAG.Transaction(transactionId), utxoDAG.TransactionMetadata(transactionId)})
+						}
+
+						return true
+					})
+
+					return nil
+				}(); err != nil {
+					return err
+				}
+			}
+
+			return nil
+		}(); err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func (utxoDAG *UTXODAG) Fork(transactionId transaction.Id, conflictingInputs []transaction.OutputId) (forked bool, err error) {
+	cachedTransaction := utxoDAG.Transaction(transactionId)
+	cachedTransactionMetadata := utxoDAG.TransactionMetadata(transactionId)
+	defer cachedTransaction.Release()
+	defer cachedTransactionMetadata.Release()
+
+	tx := cachedTransaction.Unwrap()
+	if tx == nil {
+		err = fmt.Errorf("failed to load transaction '%s'", transactionId)
+
+		return
+	}
+	txMetadata := cachedTransactionMetadata.Unwrap()
+	if txMetadata == nil {
+		err = fmt.Errorf("failed to load metadata of transaction '%s'", transactionId)
+
+		return
+	}
+
+	// abort if this transaction was finalized already
+	if txMetadata.Finalized() {
+		return
+	}
+
+	cachedTargetBranch := utxoDAG.branchManager.AddBranch(branchmanager.NewBranch(branchmanager.NewBranchId(tx.Id()), []branchmanager.BranchId{txMetadata.BranchId()}, conflictingInputs))
+	defer cachedTargetBranch.Release()
+
+	targetBranch := cachedTargetBranch.Unwrap()
+	if targetBranch == nil {
+		err = fmt.Errorf("failed to create branch for transaction '%s'", transactionId)
+
+		return
+	}
+
+	if err = utxoDAG.moveTransactionToBranch(cachedTransaction.Retain(), cachedTransactionMetadata.Retain(), cachedTargetBranch.Retain()); err != nil {
+		return
+	}
+
+	utxoDAG.Events.Fork.Trigger(cachedTransaction, cachedTransactionMetadata, targetBranch, conflictingInputs)
+	forked = true
+
+	return
+}
diff --git a/dapps/valuetransfers/packages/utxodag/utxodag_test.go b/dapps/valuetransfers/packages/utxodag/utxodag_test.go
new file mode 100644
index 00000000..4c7ab856
--- /dev/null
+++ b/dapps/valuetransfers/packages/utxodag/utxodag_test.go
@@ -0,0 +1,142 @@
+package utxodag
+
+import (
+	"io/ioutil"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/iotaledger/hive.go/crypto/ed25519"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tangle"
+	"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
+	"github.com/iotaledger/goshimmer/packages/database"
+	"github.com/iotaledger/goshimmer/plugins/config"
+)
+
+func TestNewOutput(t *testing.T) {
+	randomAddress := address.Random()
+	randomTransactionId := transaction.RandomId()
+
+	output := NewOutput(randomAddress, randomTransactionId, branchmanager.MasterBranchId, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 1337),
+	})
+
+	assert.Equal(t, randomAddress, output.Address())
+	assert.Equal(t, randomTransactionId, output.TransactionId())
+	assert.Equal(t, false, output.Solid())
+	assert.Equal(t, time.Time{}, output.SolidificationTime())
+	assert.Equal(t, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 1337),
+	}, output.Balances())
+
+	assert.Equal(t, true, output.SetSolid(true))
+	assert.Equal(t, false, output.SetSolid(true))
+	assert.Equal(t, true, output.Solid())
+	assert.NotEqual(t, time.Time{}, output.SolidificationTime())
+
+	clonedOutput, err, _ := OutputFromBytes(output.Bytes())
+	if err != nil {
+		panic(err)
+	}
+
+	assert.Equal(t, output.Address(), clonedOutput.Address())
+	assert.Equal(t, output.TransactionId(), clonedOutput.TransactionId())
+	assert.Equal(t, output.Solid(), clonedOutput.Solid())
+	assert.Equal(t, output.SolidificationTime().Round(time.Second), clonedOutput.SolidificationTime().Round(time.Second))
+	assert.Equal(t, output.Balances(), clonedOutput.Balances())
+}
+
+func TestAttachment(t *testing.T) {
+	transactionId := transaction.RandomId()
+	payloadId := payload.RandomId()
+
+	attachment := NewAttachment(transactionId, payloadId)
+
+	assert.Equal(t, transactionId, attachment.TransactionId())
+	assert.Equal(t, payloadId, attachment.PayloadId())
+
+	clonedAttachment, err, consumedBytes := AttachmentFromBytes(attachment.Bytes())
+	if err != nil {
+		panic(err)
+	}
+
+	assert.Equal(t, AttachmentLength, consumedBytes)
+	assert.Equal(t, transactionId, clonedAttachment.TransactionId())
+	assert.Equal(t, payloadId, clonedAttachment.PayloadId())
+}
+
+func TestTangle_AttachPayload(t *testing.T) {
+	dir, err := ioutil.TempDir("", t.Name())
+	require.NoError(t, err)
+	defer os.Remove(dir)
+
+	config.Node.Set(database.CFG_DIRECTORY, dir)
+
+	valueTangle := tangle.New(database.GetBadgerInstance())
+	if err := valueTangle.Prune(); err != nil {
+		t.Error(err)
+
+		return
+	}
+
+	utxoDAG := New(database.GetBadgerInstance(), valueTangle)
+
+	addressKeyPair1 := ed25519.GenerateKeyPair()
+	addressKeyPair2 := ed25519.GenerateKeyPair()
+
+	transferId1, _ := transaction.IdFromBase58("8opHzTAnfzRpPEx21XtnrVTX28YQuCpAjcn1PczScKh")
+	transferId2, _ := transaction.IdFromBase58("4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM")
+
+	input1 := NewOutput(address.FromED25519PubKey(addressKeyPair1.PublicKey), transferId1, branchmanager.MasterBranchId, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 337),
+	})
+	input1.SetSolid(true)
+	input2 := NewOutput(address.FromED25519PubKey(addressKeyPair2.PublicKey), transferId2, branchmanager.MasterBranchId, []*balance.Balance{
+		balance.New(balance.ColorIOTA, 1000),
+	})
+	input2.SetSolid(true)
+
+	utxoDAG.outputStorage.Store(input1).Release()
+	utxoDAG.outputStorage.Store(input2).Release()
+
+	outputAddress1 := address.Random()
+	outputAddress2 := address.Random()
+
+	// attach first spend
+	valueTangle.AttachPayload(payload.New(payload.GenesisId, payload.GenesisId, transaction.New(
+		transaction.NewInputs(
+			input1.Id(),
+			input2.Id(),
+		),
+
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			outputAddress1: {
+				balance.New(balance.ColorNew, 1337),
+			},
+		}),
+	)))
+
+	// attach double spend
+	valueTangle.AttachPayload(payload.New(payload.GenesisId, payload.GenesisId, transaction.New(
+		transaction.NewInputs(
+			input1.Id(),
+			input2.Id(),
+		),
+
+		transaction.NewOutputs(map[address.Address][]*balance.Balance{
+			outputAddress2: {
+				balance.New(balance.ColorNew, 1337),
+			},
+		}),
+	)))
+
+	valueTangle.Shutdown()
+	utxoDAG.Shutdown()
+}
diff --git a/go.mod b/go.mod
index 991d1dfd..74d38ecc 100644
--- a/go.mod
+++ b/go.mod
@@ -7,6 +7,7 @@ require (
 	github.com/dgraph-io/badger/v2 v2.0.2
 	github.com/dgrijalva/jwt-go v3.2.0+incompatible
 	github.com/drand/drand v0.5.4
+	github.com/gobuffalo/logger v1.0.1
 	github.com/gobuffalo/packr/v2 v2.7.1
 	github.com/golang/protobuf v1.3.4
 	github.com/googollee/go-engine.io v1.4.3-0.20190924125625-798118fc0dd2
diff --git a/packages/binary/valuetransfer/ledgerstate/ledgerstate.go b/packages/binary/valuetransfer/ledgerstate/ledgerstate.go
deleted file mode 100644
index 0bd374e5..00000000
--- a/packages/binary/valuetransfer/ledgerstate/ledgerstate.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package ledgerstate
-
-type LedgerState struct {
-}
diff --git a/packages/binary/valuetransfer/tangle/branch.go b/packages/binary/valuetransfer/tangle/branch.go
deleted file mode 100644
index ad0af4da..00000000
--- a/packages/binary/valuetransfer/tangle/branch.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package tangle
-
-import (
-	"github.com/iotaledger/hive.go/objectstorage"
-)
-
-type Branch struct {
-	objectstorage.StorableObjectFlags
-
-	id             BranchId
-	parentBranches []BranchId
-}
-
-func (branch *Branch) Update(other objectstorage.StorableObject) {
-	panic("implement me")
-}
-
-func (branch *Branch) ObjectStorageKey() []byte {
-	panic("implement me")
-}
-
-func (branch *Branch) ObjectStorageValue() []byte {
-	panic("implement me")
-}
-
-func (branch *Branch) UnmarshalObjectStorageValue(valueBytes []byte) (err error, consumedBytes int) {
-	panic("implement me")
-}
-
-func NewBranch(id BranchId, parentBranches []BranchId) *Branch {
-	return nil
-}
-
-func (branch *Branch) Id() BranchId {
-	return branch.id
-}
-
-func (branch *Branch) ParentBranches() []BranchId {
-	return branch.parentBranches
-}
-
-func (branch *Branch) IsAggregated() bool {
-	return len(branch.parentBranches) > 1
-}
-
-type CachedBranch struct {
-	objectstorage.CachedObject
-}
-
-func (cachedBranches *CachedBranch) Unwrap() *Branch {
-	if untypedObject := cachedBranches.Get(); untypedObject == nil {
-		return nil
-	} else {
-		if typedObject := untypedObject.(*Branch); typedObject == nil || typedObject.IsDeleted() {
-			return nil
-		} else {
-			return typedObject
-		}
-	}
-}
-
-func (cachedBranches *CachedBranch) Consume(consumer func(branch *Branch), forceRelease ...bool) (consumed bool) {
-	return cachedBranches.CachedObject.Consume(func(object objectstorage.StorableObject) {
-		consumer(object.(*Branch))
-	}, forceRelease...)
-}
-
-type CachedBranches map[BranchId]*CachedBranch
-
-func (cachedBranches CachedBranches) Consume(consumer func(branch *Branch)) (consumed bool) {
-	for _, cachedBranch := range cachedBranches {
-		consumed = cachedBranch.Consume(func(output *Branch) {
-			consumer(output)
-		}) || consumed
-	}
-
-	return
-}
-
-func (cachedBranches CachedBranches) Release(force ...bool) {
-	for _, cachedBranch := range cachedBranches {
-		cachedBranch.Release(force...)
-	}
-}
diff --git a/packages/binary/valuetransfer/tangle/output_test.go b/packages/binary/valuetransfer/tangle/output_test.go
deleted file mode 100644
index 690668f1..00000000
--- a/packages/binary/valuetransfer/tangle/output_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package tangle
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
-)
-
-func TestNew(t *testing.T) {
-	randomAddress := address.Random()
-	randomTransactionId := transaction.RandomId()
-
-	output := NewOutput(randomAddress, randomTransactionId, MasterBranchId, []*balance.Balance{
-		balance.New(balance.COLOR_IOTA, 1337),
-	})
-
-	assert.Equal(t, randomAddress, output.Address())
-	assert.Equal(t, randomTransactionId, output.TransactionId())
-	assert.Equal(t, false, output.Solid())
-	assert.Equal(t, time.Time{}, output.SolidificationTime())
-	assert.Equal(t, []*balance.Balance{
-		balance.New(balance.COLOR_IOTA, 1337),
-	}, output.Balances())
-
-	assert.Equal(t, true, output.SetSolid(true))
-	assert.Equal(t, false, output.SetSolid(true))
-	assert.Equal(t, true, output.Solid())
-	assert.NotEqual(t, time.Time{}, output.SolidificationTime())
-
-	clonedOutput, err, _ := OutputFromBytes(output.Bytes())
-	if err != nil {
-		panic(err)
-	}
-
-	assert.Equal(t, output.Address(), clonedOutput.Address())
-	assert.Equal(t, output.TransactionId(), clonedOutput.TransactionId())
-	assert.Equal(t, output.Solid(), clonedOutput.Solid())
-	assert.Equal(t, output.SolidificationTime().Round(time.Second), clonedOutput.SolidificationTime().Round(time.Second))
-	assert.Equal(t, output.Balances(), clonedOutput.Balances())
-}
diff --git a/packages/binary/valuetransfer/tangle/tangle.go b/packages/binary/valuetransfer/tangle/tangle.go
deleted file mode 100644
index a1aef9b3..00000000
--- a/packages/binary/valuetransfer/tangle/tangle.go
+++ /dev/null
@@ -1,984 +0,0 @@
-package tangle
-
-import (
-	"container/list"
-	"fmt"
-	"math"
-	"sort"
-	"time"
-
-	"github.com/dgraph-io/badger/v2"
-	"github.com/iotaledger/hive.go/async"
-	"github.com/iotaledger/hive.go/marshalutil"
-	"github.com/iotaledger/hive.go/objectstorage"
-	"github.com/iotaledger/hive.go/types"
-	"golang.org/x/crypto/blake2b"
-
-	"github.com/iotaledger/goshimmer/packages/binary/storageprefix"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
-)
-
-// Tangle represents the value tangle that consists out of value payloads.
-// It is an independent ontology, that lives inside the tangle.
-type Tangle struct {
-	payloadStorage         *objectstorage.ObjectStorage
-	payloadMetadataStorage *objectstorage.ObjectStorage
-	approverStorage        *objectstorage.ObjectStorage
-	missingPayloadStorage  *objectstorage.ObjectStorage
-	attachmentStorage      *objectstorage.ObjectStorage
-
-	outputStorage        *objectstorage.ObjectStorage
-	consumerStorage      *objectstorage.ObjectStorage
-	missingOutputStorage *objectstorage.ObjectStorage
-	branchStorage        *objectstorage.ObjectStorage
-
-	Events Events
-
-	storePayloadWorkerPool async.WorkerPool
-	solidifierWorkerPool   async.WorkerPool
-	bookerWorkerPool       async.WorkerPool
-	cleanupWorkerPool      async.WorkerPool
-}
-
-func New(badgerInstance *badger.DB) (result *Tangle) {
-	osFactory := objectstorage.NewFactory(badgerInstance, storageprefix.ValueTransfers)
-
-	result = &Tangle{
-		// payload related storage
-		payloadStorage:         osFactory.New(osPayload, osPayloadFactory, objectstorage.CacheTime(time.Second)),
-		payloadMetadataStorage: osFactory.New(osPayloadMetadata, osPayloadMetadataFactory, objectstorage.CacheTime(time.Second)),
-		missingPayloadStorage:  osFactory.New(osMissingPayload, osMissingPayloadFactory, objectstorage.CacheTime(time.Second)),
-		approverStorage:        osFactory.New(osApprover, osPayloadApproverFactory, objectstorage.CacheTime(time.Second), objectstorage.PartitionKey(payload.IdLength, payload.IdLength), objectstorage.KeysOnly(true)),
-
-		// transaction related storage
-		attachmentStorage:    osFactory.New(osAttachment, osAttachmentFactory, objectstorage.CacheTime(time.Second)),
-		outputStorage:        osFactory.New(osOutput, osOutputFactory, OutputKeyPartitions, objectstorage.CacheTime(time.Second)),
-		missingOutputStorage: osFactory.New(osMissingOutput, osMissingOutputFactory, MissingOutputKeyPartitions, objectstorage.CacheTime(time.Second)),
-		consumerStorage:      osFactory.New(osConsumer, osConsumerFactory, ConsumerPartitionKeys, objectstorage.CacheTime(time.Second)),
-
-		Events: *newEvents(),
-	}
-
-	return
-}
-
-// AttachPayload adds a new payload to the value tangle.
-func (tangle *Tangle) AttachPayload(payload *payload.Payload) {
-	tangle.storePayloadWorkerPool.Submit(func() { tangle.storePayloadWorker(payload) })
-}
-
-// GetPayload retrieves a payload from the object storage.
-func (tangle *Tangle) GetPayload(payloadId payload.Id) *payload.CachedPayload {
-	return &payload.CachedPayload{CachedObject: tangle.payloadStorage.Load(payloadId.Bytes())}
-}
-
-// GetPayloadMetadata retrieves the metadata of a value payload from the object storage.
-func (tangle *Tangle) GetPayloadMetadata(payloadId payload.Id) *CachedPayloadMetadata {
-	return &CachedPayloadMetadata{CachedObject: tangle.payloadMetadataStorage.Load(payloadId.Bytes())}
-}
-
-// GetPayloadMetadata retrieves the metadata of a value payload from the object storage.
-func (tangle *Tangle) GetTransactionMetadata(transactionId transaction.Id) *CachedTransactionMetadata {
-	return &CachedTransactionMetadata{CachedObject: tangle.missingOutputStorage.Load(transactionId.Bytes())}
-}
-
-func (tangle *Tangle) GetTransactionOutput(outputId transaction.OutputId) *CachedOutput {
-	return &CachedOutput{CachedObject: tangle.outputStorage.Load(outputId.Bytes())}
-}
-
-// GetApprovers retrieves the approvers of a payload from the object storage.
-func (tangle *Tangle) GetApprovers(payloadId payload.Id) CachedApprovers {
-	approvers := make(CachedApprovers, 0)
-	tangle.approverStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
-		approvers = append(approvers, &CachedPayloadApprover{CachedObject: cachedObject})
-
-		return true
-	}, payloadId.Bytes())
-
-	return approvers
-}
-
-// GetConsumers retrieves the approvers of a payload from the object storage.
-func (tangle *Tangle) GetConsumers(outputId transaction.OutputId) CachedConsumers {
-	consumers := make(CachedConsumers, 0)
-	tangle.consumerStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
-		consumers = append(consumers, &CachedConsumer{CachedObject: cachedObject})
-
-		return true
-	}, outputId.Bytes())
-
-	return consumers
-}
-
-// GetApprovers retrieves the approvers of a payload from the object storage.
-func (tangle *Tangle) GetAttachments(transactionId transaction.Id) CachedAttachments {
-	attachments := make(CachedAttachments, 0)
-	tangle.attachmentStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool {
-		attachments = append(attachments, &CachedAttachment{CachedObject: cachedObject})
-
-		return true
-	}, transactionId.Bytes())
-
-	return attachments
-}
-
-// Shutdown stops the worker pools and shuts down the object storage instances.
-func (tangle *Tangle) Shutdown() *Tangle {
-	tangle.storePayloadWorkerPool.ShutdownGracefully()
-	tangle.solidifierWorkerPool.ShutdownGracefully()
-	tangle.cleanupWorkerPool.ShutdownGracefully()
-
-	tangle.payloadStorage.Shutdown()
-	tangle.payloadMetadataStorage.Shutdown()
-	tangle.approverStorage.Shutdown()
-	tangle.missingPayloadStorage.Shutdown()
-
-	return tangle
-}
-
-// Prune resets the database and deletes all objects (for testing or "node resets").
-func (tangle *Tangle) Prune() error {
-	for _, storage := range []*objectstorage.ObjectStorage{
-		tangle.payloadStorage,
-		tangle.payloadMetadataStorage,
-		tangle.approverStorage,
-		tangle.missingPayloadStorage,
-	} {
-		if err := storage.Prune(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// storePayloadWorker is the worker function that stores the payload and calls the corresponding storage events.
-func (tangle *Tangle) storePayloadWorker(payloadToStore *payload.Payload) {
-	// store the payload and transaction models
-	cachedPayload, cachedPayloadMetadata, payloadStored := tangle.storePayload(payloadToStore)
-	if !payloadStored {
-		// abort if we have seen the payload already
-		return
-	}
-	cachedTransactionMetadata, transactionStored := tangle.storeTransaction(payloadToStore.Transaction())
-
-	// store the references between the different entities (we do this after the actual entities were stored, so that
-	// all the metadata models exist in the database as soon as the entities are reachable by walks).
-	tangle.storePayloadReferences(payloadToStore)
-	if transactionStored {
-		tangle.storeTransactionReferences(payloadToStore.Transaction())
-	}
-
-	// trigger events
-	if tangle.missingPayloadStorage.DeleteIfPresent(payloadToStore.Id().Bytes()) {
-		tangle.Events.MissingPayloadReceived.Trigger(cachedPayload, cachedPayloadMetadata)
-	}
-	tangle.Events.PayloadAttached.Trigger(cachedPayload, cachedPayloadMetadata)
-
-	// check solidity
-	tangle.solidifierWorkerPool.Submit(func() {
-		tangle.solidifyTransactionWorker(cachedPayload, cachedPayloadMetadata, cachedTransactionMetadata)
-	})
-}
-
-func (tangle *Tangle) storePayload(payloadToStore *payload.Payload) (cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, payloadStored bool) {
-	if _tmp, transactionIsNew := tangle.payloadStorage.StoreIfAbsent(payloadToStore); !transactionIsNew {
-		return
-	} else {
-		cachedPayload = &payload.CachedPayload{CachedObject: _tmp}
-		cachedMetadata = &CachedPayloadMetadata{CachedObject: tangle.payloadMetadataStorage.Store(NewPayloadMetadata(payloadToStore.Id()))}
-		payloadStored = true
-
-		return
-	}
-}
-
-func (tangle *Tangle) storeTransaction(tx *transaction.Transaction) (cachedTransactionMetadata *CachedTransactionMetadata, transactionStored bool) {
-	cachedTransactionMetadata = &CachedTransactionMetadata{CachedObject: tangle.payloadMetadataStorage.ComputeIfAbsent(tx.Id().Bytes(), func(key []byte) objectstorage.StorableObject {
-		transactionStored = true
-
-		result := NewTransactionMetadata(tx.Id())
-		result.Persist()
-		result.SetModified()
-
-		return result
-	})}
-
-	return
-}
-
-func (tangle *Tangle) storePayloadReferences(payload *payload.Payload) {
-	// store trunk approver
-	trunkId := payload.TrunkId()
-	tangle.approverStorage.Store(NewPayloadApprover(trunkId, payload.Id())).Release()
-
-	// store branch approver
-	if branchId := payload.BranchId(); branchId != trunkId {
-		tangle.approverStorage.Store(NewPayloadApprover(branchId, trunkId)).Release()
-	}
-
-	// store a reference from the transaction to the payload that attached it
-	tangle.attachmentStorage.Store(NewAttachment(payload.Transaction().Id(), payload.Id()))
-}
-
-func (tangle *Tangle) storeTransactionReferences(tx *transaction.Transaction) {
-	// store references to the consumed outputs
-	tx.Inputs().ForEach(func(outputId transaction.OutputId) bool {
-		tangle.consumerStorage.Store(NewConsumer(outputId, tx.Id()))
-
-		return true
-	})
-}
-
-func (tangle *Tangle) popElementsFromSolidificationStack(stack *list.List) (*payload.CachedPayload, *CachedPayloadMetadata, *CachedTransactionMetadata) {
-	currentSolidificationEntry := stack.Front()
-	currentCachedPayload := currentSolidificationEntry.Value.([3]interface{})[0]
-	currentCachedMetadata := currentSolidificationEntry.Value.([3]interface{})[1]
-	currentCachedTransactionMetadata := currentSolidificationEntry.Value.([3]interface{})[2]
-	stack.Remove(currentSolidificationEntry)
-
-	return currentCachedPayload.(*payload.CachedPayload), currentCachedMetadata.(*CachedPayloadMetadata), currentCachedTransactionMetadata.(*CachedTransactionMetadata)
-}
-
-// solidifyTransactionWorker is the worker function that solidifies the payloads (recursively from past to present).
-func (tangle *Tangle) solidifyTransactionWorker(cachedPayload *payload.CachedPayload, cachedMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
-	// initialize the stack
-	solidificationStack := list.New()
-	solidificationStack.PushBack([3]interface{}{cachedPayload, cachedMetadata, cachedTransactionMetadata})
-
-	// process payloads that are supposed to be checked for solidity recursively
-	for solidificationStack.Len() > 0 {
-		// execute logic inside a func, so we can use defer to release the objects
-		func() {
-			// retrieve cached objects
-			currentCachedPayload, currentCachedMetadata, currentCachedTransactionMetadata := tangle.popElementsFromSolidificationStack(solidificationStack)
-			defer currentCachedPayload.Release()
-			defer currentCachedMetadata.Release()
-			defer currentCachedTransactionMetadata.Release()
-
-			// unwrap cached objects
-			currentPayload := currentCachedPayload.Unwrap()
-			currentPayloadMetadata := currentCachedMetadata.Unwrap()
-			currentTransactionMetadata := currentCachedTransactionMetadata.Unwrap()
-
-			// abort if any of the retrieved models is nil or payload is not solid
-			if currentPayload == nil || currentPayloadMetadata == nil || currentTransactionMetadata == nil || !tangle.isPayloadSolid(currentPayload, currentPayloadMetadata) {
-				return
-			}
-
-			// abort if the transaction is not solid or invalid
-			if transactionSolid, err := tangle.isTransactionSolid(currentPayload.Transaction(), currentTransactionMetadata); !transactionSolid || err != nil {
-				if err != nil {
-					// TODO: TRIGGER INVALID TX + REMOVE TXS THAT APPROVE IT
-					fmt.Println(err)
-				}
-
-				return
-			}
-
-			// abort if the payload was marked as solid already (if a payload is solid already then the tx is also solid)
-			if !currentPayloadMetadata.SetSolid(true) {
-				return
-			}
-
-			// ... trigger solid event ...
-			tangle.Events.PayloadSolid.Trigger(currentCachedPayload, currentCachedMetadata)
-
-			// ... and schedule check of approvers
-			tangle.ForeachApprovers(currentPayload.Id(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
-				solidificationStack.PushBack([3]interface{}{payload, payloadMetadata, cachedTransactionMetadata})
-			})
-
-			// book the outputs
-			if !currentTransactionMetadata.SetSolid(true) {
-				return
-			}
-
-			tangle.Events.TransactionSolid.Trigger(currentPayload.Transaction(), currentTransactionMetadata)
-
-			tangle.ForEachConsumers(currentPayload.Transaction(), func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata) {
-				solidificationStack.PushBack([3]interface{}{payload, payloadMetadata, cachedTransactionMetadata})
-			})
-
-			payloadToBook := cachedPayload.Retain()
-			tangle.bookerWorkerPool.Submit(func() {
-				tangle.bookPayloadTransaction(payloadToBook)
-			})
-		}()
-	}
-}
-
-func (tangle *Tangle) bookPayloadTransaction(cachedPayload *payload.CachedPayload) {
-	payloadToBook := cachedPayload.Unwrap()
-	defer cachedPayload.Release()
-
-	if payloadToBook == nil {
-		return
-	}
-	transactionToBook := payloadToBook.Transaction()
-
-	consumedBranches := make(map[BranchId]types.Empty)
-	conflictingConsumersToFork := make(map[transaction.Id]types.Empty)
-	createFork := false
-
-	inputsSuccessfullyProcessed := payloadToBook.Transaction().Inputs().ForEach(func(outputId transaction.OutputId) bool {
-		cachedOutput := tangle.GetTransactionOutput(outputId)
-		defer cachedOutput.Release()
-
-		// abort if the output could not be found
-		output := cachedOutput.Unwrap()
-		if output == nil {
-			return false
-		}
-
-		consumedBranches[output.BranchId()] = types.Void
-
-		// continue if we are the first consumer and there is no double spend
-		consumerCount, firstConsumerId := output.RegisterConsumer(transactionToBook.Id())
-		if consumerCount == 0 {
-			return true
-		}
-
-		// fork into a new branch
-		createFork = true
-
-		// also fork the previous consumer
-		if consumerCount == 1 {
-			conflictingConsumersToFork[firstConsumerId] = types.Void
-		}
-
-		return true
-	})
-
-	if !inputsSuccessfullyProcessed {
-		return
-	}
-
-	transactionToBook.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
-		newOutput := NewOutput(address, transactionToBook.Id(), MasterBranchId, balances)
-		newOutput.SetSolid(true)
-		tangle.outputStorage.Store(newOutput)
-
-		return true
-	})
-
-	fmt.Println(consumedBranches)
-	fmt.Println(MasterBranchId)
-	fmt.Println(createFork)
-}
-
-func (tangle *Tangle) InheritBranches(branches ...BranchId) (cachedAggregatedBranch *CachedBranch, err error) {
-	// return the MasterBranch if we have no branches in the parameters
-	if len(branches) == 0 {
-		cachedAggregatedBranch = tangle.GetBranch(MasterBranchId)
-
-		return
-	}
-
-	if len(branches) == 1 {
-		cachedAggregatedBranch = tangle.GetBranch(branches[0])
-
-		return
-	}
-
-	// filter out duplicates and shared ancestor Branches (abort if we faced an error)
-	deepestCommonAncestors, err := tangle.findDeepestCommonAncestorBranches(branches...)
-	if err != nil {
-		return
-	}
-
-	// if there is only one branch that we found, then we are done
-	if len(deepestCommonAncestors) == 1 {
-		for _, firstBranchInList := range deepestCommonAncestors {
-			cachedAggregatedBranch = firstBranchInList
-		}
-
-		return
-	}
-
-	// if there is more than one parents: aggregate
-	aggregatedBranchId, aggregatedBranchParents, err := tangle.determineAggregatedBranchDetails(deepestCommonAncestors)
-	if err != nil {
-		return
-	}
-
-	newAggregatedBranchCreated := false
-	cachedAggregatedBranch = &CachedBranch{CachedObject: tangle.branchStorage.ComputeIfAbsent(aggregatedBranchId.Bytes(), func(key []byte) (object objectstorage.StorableObject) {
-		aggregatedReality := NewBranch(aggregatedBranchId, aggregatedBranchParents)
-
-		// TODO: FIX
-		/*
-			for _, parentRealityId := range aggregatedBranchParents {
-				tangle.GetBranch(parentRealityId).Consume(func(branch *Branch) {
-					branch.RegisterSubReality(aggregatedRealityId)
-				})
-			}
-		*/
-
-		aggregatedReality.SetModified()
-
-		newAggregatedBranchCreated = true
-
-		return aggregatedReality
-	})}
-
-	if !newAggregatedBranchCreated {
-		fmt.Println("1")
-		// TODO: FIX
-		/*
-			aggregatedBranch := cachedAggregatedBranch.Unwrap()
-
-			for _, realityId := range aggregatedBranchParents {
-				if aggregatedBranch.AddParentReality(realityId) {
-					tangle.GetBranch(realityId).Consume(func(branch *Branch) {
-						branch.RegisterSubReality(aggregatedRealityId)
-					})
-				}
-			}
-		*/
-	}
-
-	return
-}
-
-func (tangle *Tangle) determineAggregatedBranchDetails(deepestCommonAncestors CachedBranches) (aggregatedBranchId BranchId, aggregatedBranchParents []BranchId, err error) {
-	aggregatedBranchParents = make([]BranchId, len(deepestCommonAncestors))
-
-	i := 0
-	aggregatedBranchConflictParents := make(CachedBranches)
-	for branchId, cachedBranch := range deepestCommonAncestors {
-		// release all following entries if we have encountered an error
-		if err != nil {
-			cachedBranch.Release()
-
-			continue
-		}
-
-		// store BranchId as parent
-		aggregatedBranchParents[i] = branchId
-		i++
-
-		// abort if we could not unwrap the Branch (should never happen)
-		branch := cachedBranch.Unwrap()
-		if branch == nil {
-			cachedBranch.Release()
-
-			err = fmt.Errorf("failed to unwrap brach '%s'", branchId)
-
-			continue
-		}
-
-		if branch.IsAggregated() {
-			aggregatedBranchConflictParents[branchId] = cachedBranch
-
-			continue
-		}
-
-		err = tangle.collectClosestConflictAncestors(branch, aggregatedBranchConflictParents)
-
-		cachedBranch.Release()
-	}
-
-	if err != nil {
-		aggregatedBranchConflictParents.Release()
-		aggregatedBranchConflictParents = nil
-
-		return
-	}
-
-	aggregatedBranchId = tangle.generateAggregatedBranchId(aggregatedBranchConflictParents)
-
-	return
-}
-
-func (tangle *Tangle) generateAggregatedBranchId(aggregatedBranches CachedBranches) BranchId {
-	counter := 0
-	branchIds := make([]BranchId, len(aggregatedBranches))
-	for branchId, cachedBranch := range aggregatedBranches {
-		branchIds[counter] = branchId
-
-		counter++
-
-		cachedBranch.Release()
-	}
-
-	sort.Slice(branchIds, func(i, j int) bool {
-		for k := 0; k < len(branchIds[k]); k++ {
-			if branchIds[i][k] < branchIds[j][k] {
-				return true
-			} else if branchIds[i][k] > branchIds[j][k] {
-				return false
-			}
-		}
-
-		return false
-	})
-
-	marshalUtil := marshalutil.New(BranchIdLength * len(branchIds))
-	for _, branchId := range branchIds {
-		marshalUtil.WriteBytes(branchId.Bytes())
-	}
-
-	return blake2b.Sum256(marshalUtil.Bytes())
-}
-
-func (tangle *Tangle) collectClosestConflictAncestors(branch *Branch, closestConflictAncestors CachedBranches) (err error) {
-	// initialize stack
-	stack := list.New()
-	for _, parentRealityId := range branch.ParentBranches() {
-		stack.PushBack(parentRealityId)
-	}
-
-	// work through stack
-	processedBranches := make(map[BranchId]types.Empty)
-	for stack.Len() != 0 {
-		// iterate through the parents (in a func so we can used defer)
-		err = func() error {
-			// pop parent branch id from stack
-			firstStackElement := stack.Front()
-			defer stack.Remove(firstStackElement)
-			parentBranchId := stack.Front().Value.(BranchId)
-
-			// abort if the parent has been processed already
-			if _, branchProcessed := processedBranches[parentBranchId]; branchProcessed {
-				return nil
-			}
-			processedBranches[parentBranchId] = types.Void
-
-			// load parent branch from database
-			cachedParentBranch := tangle.GetBranch(parentBranchId)
-
-			// abort if the parent branch could not be found (should never happen)
-			parentBranch := cachedParentBranch.Unwrap()
-			if parentBranch == nil {
-				cachedParentBranch.Release()
-
-				return fmt.Errorf("failed to load branch '%s'", parentBranchId)
-			}
-
-			// if the parent Branch is not aggregated, then we have found the closest conflict ancestor
-			if !parentBranch.IsAggregated() {
-				closestConflictAncestors[parentBranchId] = cachedParentBranch
-
-				return nil
-			}
-
-			// queue parents for additional check (recursion)
-			for _, parentRealityId := range parentBranch.ParentBranches() {
-				stack.PushBack(parentRealityId)
-			}
-
-			// release the branch (we don't need it anymore)
-			cachedParentBranch.Release()
-
-			return nil
-		}()
-
-		if err != nil {
-			return
-		}
-	}
-
-	return
-}
-
-// findDeepestCommonAncestorBranches takes a number of BranchIds and determines the most specialized Branches (furthest
-// away from the MasterBranch) in that list, that contains all of the named BranchIds.
-//
-// Example: If we hand in "A, B" and B has A as its parent, then the result will contain the Branch B, because B is a
-//          child of A.
-func (tangle *Tangle) findDeepestCommonAncestorBranches(branches ...BranchId) (result CachedBranches, err error) {
-	result = make(CachedBranches)
-
-	processedBranches := make(map[BranchId]types.Empty)
-	for _, branchId := range branches {
-		err = func() error {
-			// continue, if we have processed this branch already
-			if _, exists := processedBranches[branchId]; exists {
-				return nil
-			}
-			processedBranches[branchId] = types.Void
-
-			// load branch from objectstorage
-			cachedBranch := tangle.GetBranch(branchId)
-
-			// abort if we could not load the CachedBranch
-			branch := cachedBranch.Unwrap()
-			if branch == nil {
-				cachedBranch.Release()
-
-				return fmt.Errorf("could not load branch '%s'", branchId)
-			}
-
-			// check branches position relative to already aggregated branches
-			for aggregatedBranchId, cachedAggregatedBranch := range result {
-				// abort if we can not load the branch
-				aggregatedBranch := cachedAggregatedBranch.Unwrap()
-				if aggregatedBranch == nil {
-					return fmt.Errorf("could not load branch '%s'", aggregatedBranchId)
-				}
-
-				// if the current branch is an ancestor of an already aggregated branch, then we have found the more
-				// "specialized" branch already and keep it
-				if isAncestor, ancestorErr := tangle.branchIsAncestorOfBranch(branch, aggregatedBranch); isAncestor || ancestorErr != nil {
-					return ancestorErr
-				}
-
-				// check if the aggregated Branch is an ancestor of the current Branch and abort if we face an error
-				isAncestor, ancestorErr := tangle.branchIsAncestorOfBranch(aggregatedBranch, branch)
-				if ancestorErr != nil {
-					return ancestorErr
-				}
-
-				// if the aggregated branch is an ancestor of the current branch, then we have found a more specialized
-				// Branch and replace the old one with this one.
-				if isAncestor {
-					// replace aggregated branch if we have found a more specialized on
-					delete(result, aggregatedBranchId)
-					cachedAggregatedBranch.Release()
-
-					result[branchId] = cachedBranch
-
-					return nil
-				}
-			}
-
-			// store the branch as a new aggregate candidate if it was not found to be in any relation with the already
-			// aggregated ones.
-			result[branchId] = cachedBranch
-
-			return nil
-		}()
-
-		// abort if an error occurred while processing the current branch
-		if err != nil {
-			result.Release()
-			result = nil
-
-			return
-		}
-	}
-
-	return
-}
-
-func (tangle *Tangle) branchIsAncestorOfBranch(ancestor *Branch, descendant *Branch) (isAncestor bool, err error) {
-	if ancestor.Id() == descendant.Id() {
-		return true, nil
-	}
-
-	ancestorBranches, err := tangle.getAncestorBranches(descendant)
-	if err != nil {
-		return
-	}
-
-	ancestorBranches.Consume(func(ancestorOfDescendant *Branch) {
-		if ancestorOfDescendant.Id() == ancestor.Id() {
-			isAncestor = true
-		}
-	})
-
-	return
-}
-
-func (tangle *Tangle) getAncestorBranches(branch *Branch) (ancestorBranches CachedBranches, err error) {
-	// initialize result
-	ancestorBranches = make(CachedBranches)
-
-	// initialize stack
-	stack := list.New()
-	for _, parentRealityId := range branch.ParentBranches() {
-		stack.PushBack(parentRealityId)
-	}
-
-	// work through stack
-	for stack.Len() != 0 {
-		// iterate through the parents (in a func so we can used defer)
-		err = func() error {
-			// pop parent branch id from stack
-			firstStackElement := stack.Front()
-			defer stack.Remove(firstStackElement)
-			parentBranchId := stack.Front().Value.(BranchId)
-
-			// abort if the parent has been processed already
-			if _, branchProcessed := ancestorBranches[parentBranchId]; branchProcessed {
-				return nil
-			}
-
-			// load parent branch from database
-			cachedParentBranch := tangle.GetBranch(parentBranchId)
-
-			// abort if the parent branch could not be founds (should never happen)
-			parentBranch := cachedParentBranch.Unwrap()
-			if parentBranch == nil {
-				cachedParentBranch.Release()
-
-				return fmt.Errorf("failed to unwrap branch '%s'", parentBranchId)
-			}
-
-			// store parent branch in result
-			ancestorBranches[parentBranchId] = cachedParentBranch
-
-			// queue parents for additional check (recursion)
-			for _, parentRealityId := range parentBranch.ParentBranches() {
-				stack.PushBack(parentRealityId)
-			}
-
-			return nil
-		}()
-
-		// abort if an error occurs while trying to process the parents
-		if err != nil {
-			ancestorBranches.Release()
-			ancestorBranches = nil
-
-			return
-		}
-	}
-
-	return
-}
-
-func (tangle *Tangle) GetBranch(branchId BranchId) *CachedBranch {
-	// TODO: IMPLEMENT
-	return nil
-}
-
-func (tangle *Tangle) ForeachApprovers(payloadId payload.Id, consume func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata)) {
-	tangle.GetApprovers(payloadId).Consume(func(approver *PayloadApprover) {
-		approvingPayloadId := approver.GetApprovingPayloadId()
-		approvingCachedPayload := tangle.GetPayload(approvingPayloadId)
-
-		approvingCachedPayload.Consume(func(payload *payload.Payload) {
-			consume(approvingCachedPayload, tangle.GetPayloadMetadata(approvingPayloadId), tangle.GetTransactionMetadata(payload.Transaction().Id()))
-		})
-	})
-}
-
-func (tangle *Tangle) ForEachConsumers(currentTransaction *transaction.Transaction, consume func(payload *payload.CachedPayload, payloadMetadata *CachedPayloadMetadata, cachedTransactionMetadata *CachedTransactionMetadata)) {
-	seenTransactions := make(map[transaction.Id]types.Empty)
-	currentTransaction.Outputs().ForEach(func(address address.Address, balances []*balance.Balance) bool {
-		tangle.GetConsumers(transaction.NewOutputId(address, currentTransaction.Id())).Consume(func(consumer *Consumer) {
-			// keep track of the processed transactions (the same transaction can consume multiple outputs)
-			if _, transactionSeen := seenTransactions[consumer.TransactionId()]; transactionSeen {
-				seenTransactions[consumer.TransactionId()] = types.Void
-
-				transactionMetadata := tangle.GetTransactionMetadata(consumer.TransactionId())
-
-				// retrieve all the payloads that attached the transaction
-				tangle.GetAttachments(consumer.TransactionId()).Consume(func(attachment *Attachment) {
-					consume(tangle.GetPayload(attachment.PayloadId()), tangle.GetPayloadMetadata(attachment.PayloadId()), transactionMetadata)
-				})
-			}
-		})
-
-		return true
-	})
-}
-
-// isPayloadSolid returns true if the given payload is solid. A payload is considered to be solid solid, if it is either
-// already marked as solid or if its referenced payloads are marked as solid.
-func (tangle *Tangle) isPayloadSolid(payload *payload.Payload, metadata *PayloadMetadata) bool {
-	if payload == nil || payload.IsDeleted() {
-		return false
-	}
-
-	if metadata == nil || metadata.IsDeleted() {
-		return false
-	}
-
-	if metadata.IsSolid() {
-		return true
-	}
-
-	return tangle.isPayloadMarkedAsSolid(payload.TrunkId()) && tangle.isPayloadMarkedAsSolid(payload.BranchId())
-}
-
-// isPayloadMarkedAsSolid returns true if the payload was marked as solid already (by setting the corresponding flags
-// in its metadata.
-func (tangle *Tangle) isPayloadMarkedAsSolid(payloadId payload.Id) bool {
-	if payloadId == payload.GenesisId {
-		return true
-	}
-
-	transactionMetadataCached := tangle.GetPayloadMetadata(payloadId)
-	if transactionMetadata := transactionMetadataCached.Unwrap(); transactionMetadata == nil {
-		transactionMetadataCached.Release()
-
-		// if transaction is missing and was not reported as missing, yet
-		if cachedMissingPayload, missingPayloadStored := tangle.missingPayloadStorage.StoreIfAbsent(NewMissingPayload(payloadId)); missingPayloadStored {
-			cachedMissingPayload.Consume(func(object objectstorage.StorableObject) {
-				tangle.Events.PayloadMissing.Trigger(object.(*MissingPayload).GetId())
-			})
-		}
-
-		return false
-	} else if !transactionMetadata.IsSolid() {
-		transactionMetadataCached.Release()
-
-		return false
-	}
-	transactionMetadataCached.Release()
-
-	return true
-}
-
-func (tangle *Tangle) isTransactionSolid(tx *transaction.Transaction, metadata *TransactionMetadata) (bool, error) {
-	// abort if any of the models are nil or has been deleted
-	if tx == nil || tx.IsDeleted() || metadata == nil || metadata.IsDeleted() {
-		return false, nil
-	}
-
-	// abort if we have previously determined the solidity status of the transaction already
-	if metadata.Solid() {
-		return true, nil
-	}
-
-	// get outputs that were referenced in the transaction inputs
-	cachedInputs := tangle.getCachedOutputsFromTransactionInputs(tx)
-	defer cachedInputs.Release()
-
-	// check the solidity of the inputs and retrieve the consumed balances
-	inputsSolid, consumedBalances, err := tangle.checkTransactionInputs(cachedInputs)
-
-	// abort if an error occurred or the inputs are not solid, yet
-	if !inputsSolid || err != nil {
-		return false, err
-	}
-
-	if !tangle.checkTransactionOutputs(consumedBalances, tx.Outputs()) {
-		return false, fmt.Errorf("the outputs do not match the inputs in transaction with id '%s'", tx.Id())
-	}
-
-	return true, nil
-}
-
-func (tangle *Tangle) getCachedOutputsFromTransactionInputs(tx *transaction.Transaction) (result CachedOutputs) {
-	result = make(CachedOutputs)
-	tx.Inputs().ForEach(func(inputId transaction.OutputId) bool {
-		result[inputId] = tangle.GetTransactionOutput(inputId)
-
-		return true
-	})
-
-	return
-}
-
-func (tangle *Tangle) checkTransactionInputs(cachedInputs CachedOutputs) (inputsSolid bool, consumedBalances map[balance.Color]int64, err error) {
-	inputsSolid = true
-	consumedBalances = make(map[balance.Color]int64)
-
-	for inputId, cachedInput := range cachedInputs {
-		if !cachedInput.Exists() {
-			inputsSolid = false
-
-			if cachedMissingOutput, missingOutputStored := tangle.missingOutputStorage.StoreIfAbsent(NewMissingOutput(inputId)); missingOutputStored {
-				cachedMissingOutput.Consume(func(object objectstorage.StorableObject) {
-					tangle.Events.OutputMissing.Trigger(object.(*MissingOutput).Id())
-				})
-			}
-
-			continue
-		}
-
-		// should never be nil as we check Exists() before
-		input := cachedInput.Unwrap()
-
-		// update solid status
-		inputsSolid = inputsSolid && input.Solid()
-
-		// calculate the input balances
-		for _, inputBalance := range input.Balances() {
-			var newBalance int64
-			if currentBalance, balanceExists := consumedBalances[inputBalance.Color()]; balanceExists {
-				// check overflows in the numbers
-				if inputBalance.Value() > math.MaxInt64-currentBalance {
-					err = fmt.Errorf("buffer overflow in balances of inputs")
-
-					return
-				}
-
-				newBalance = currentBalance + inputBalance.Value()
-			} else {
-				newBalance = inputBalance.Value()
-			}
-			consumedBalances[inputBalance.Color()] = newBalance
-		}
-	}
-
-	return
-}
-
-// checkTransactionOutputs is a utility function that returns true, if the outputs are consuming all of the given inputs
-// (the sum of all the balance changes is 0). It also accounts for the ability to "recolor" coins during the creating of
-// outputs. If this function returns false, then the outputs that are defined in the transaction are invalid and the
-// transaction should be removed from the ledger state.
-func (tangle *Tangle) checkTransactionOutputs(inputBalances map[balance.Color]int64, outputs *transaction.Outputs) bool {
-	// create a variable to keep track of outputs that create a new color
-	var newlyColoredCoins int64
-
-	// iterate through outputs and check them one by one
-	aborted := !outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool {
-		for _, outputBalance := range balances {
-			// abort if the output creates a negative or empty output
-			if outputBalance.Value() <= 0 {
-				return false
-			}
-
-			// sidestep logic if we have a newly colored output (we check the supply later)
-			if outputBalance.Color() == balance.COLOR_NEW {
-				// catch overflows
-				if newlyColoredCoins > math.MaxInt64-outputBalance.Value() {
-					return false
-				}
-
-				newlyColoredCoins += outputBalance.Value()
-
-				continue
-			}
-
-			// check if the used color does not exist in our supply
-			availableBalance, spentColorExists := inputBalances[outputBalance.Color()]
-			if !spentColorExists {
-				return false
-			}
-
-			// abort if we spend more coins of the given color than we have
-			if availableBalance < outputBalance.Value() {
-				return false
-			}
-
-			// subtract the spent coins from the supply of this transaction
-			inputBalances[outputBalance.Color()] -= outputBalance.Value()
-
-			// cleanup the entry in the supply map if we have exhausted all funds
-			if inputBalances[outputBalance.Color()] == 0 {
-				delete(inputBalances, outputBalance.Color())
-			}
-		}
-
-		return true
-	})
-
-	// abort if the previous checks failed
-	if aborted {
-		return false
-	}
-
-	// determine the unspent inputs
-	var unspentCoins int64
-	for _, unspentBalance := range inputBalances {
-		// catch overflows
-		if unspentCoins > math.MaxInt64-unspentBalance {
-			return false
-		}
-
-		unspentCoins += unspentBalance
-	}
-
-	// the outputs are valid if they spend all outputs
-	return unspentCoins == newlyColoredCoins
-}
diff --git a/packages/binary/valuetransfer/tangle/tangle_test.go b/packages/binary/valuetransfer/tangle/tangle_test.go
deleted file mode 100644
index 9980c5be..00000000
--- a/packages/binary/valuetransfer/tangle/tangle_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package tangle
-
-import (
-	"io/ioutil"
-	"os"
-	"testing"
-	"time"
-
-	"github.com/iotaledger/hive.go/crypto/ed25519"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/address"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/balance"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/payload"
-	"github.com/iotaledger/goshimmer/packages/binary/valuetransfer/transaction"
-	"github.com/iotaledger/goshimmer/packages/database"
-	"github.com/iotaledger/goshimmer/plugins/config"
-)
-
-func TestAttachment(t *testing.T) {
-	transactionId := transaction.RandomId()
-	payloadId := payload.RandomId()
-
-	attachment := NewAttachment(transactionId, payloadId)
-
-	assert.Equal(t, transactionId, attachment.TransactionId())
-	assert.Equal(t, payloadId, attachment.PayloadId())
-
-	clonedAttachment, err, consumedBytes := AttachmentFromBytes(attachment.Bytes())
-	if err != nil {
-		panic(err)
-	}
-
-	assert.Equal(t, AttachmentLength, consumedBytes)
-	assert.Equal(t, transactionId, clonedAttachment.TransactionId())
-	assert.Equal(t, payloadId, clonedAttachment.PayloadId())
-}
-
-func TestTangle_AttachPayload(t *testing.T) {
-	dir, err := ioutil.TempDir("", t.Name())
-	require.NoError(t, err)
-	defer os.Remove(dir)
-
-	config.Node.Set(database.CFG_DIRECTORY, dir)
-
-	tangle := New(database.GetBadgerInstance())
-	if err := tangle.Prune(); err != nil {
-		t.Error(err)
-
-		return
-	}
-
-	addressKeyPair1 := ed25519.GenerateKeyPair()
-	addressKeyPair2 := ed25519.GenerateKeyPair()
-
-	transferId1, _ := transaction.IdFromBase58("8opHzTAnfzRpPEx21XtnrVTX28YQuCpAjcn1PczScKh")
-	transferId2, _ := transaction.IdFromBase58("4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM")
-
-	input1 := NewOutput(address.FromED25519PubKey(addressKeyPair1.PublicKey), transferId1, MasterBranchId, []*balance.Balance{
-		balance.New(balance.COLOR_IOTA, 337),
-	})
-	input1.SetSolid(true)
-	input2 := NewOutput(address.FromED25519PubKey(addressKeyPair2.PublicKey), transferId2, MasterBranchId, []*balance.Balance{
-		balance.New(balance.COLOR_IOTA, 1000),
-	})
-	input2.SetSolid(true)
-
-	tangle.outputStorage.Store(input1)
-	tangle.outputStorage.Store(input2)
-
-	outputAddress := address.Random()
-
-	tx := transaction.New(
-		transaction.NewInputs(
-			input1.Id(),
-			input2.Id(),
-		),
-
-		transaction.NewOutputs(map[address.Address][]*balance.Balance{
-			outputAddress: {
-				balance.New(balance.COLOR_NEW, 1337),
-			},
-		}),
-	)
-
-	tangle.AttachPayload(payload.New(payload.GenesisId, payload.GenesisId, tx))
-
-	time.Sleep(1 * time.Second)
-
-	outputFound := tangle.GetTransactionOutput(transaction.NewOutputId(outputAddress, tx.Id())).Consume(func(output *Output) {
-		assert.Equal(t, true, output.Solid())
-	})
-	assert.Equal(t, true, outputFound)
-
-	tangle.Shutdown()
-}
-- 
GitLab