diff --git a/packages/autopeering/discover/manager.go b/packages/autopeering/discover/manager.go
index e451a3cf6cee248811613f26b517b222e139c690..3bd36f068cb2d8fd90a1d9ea215d273c99f380af 100644
--- a/packages/autopeering/discover/manager.go
+++ b/packages/autopeering/discover/manager.go
@@ -209,6 +209,7 @@ func (m *manager) loadInitialPeers(masters []*peer.Peer) {
 	if db != nil {
 		peers = db.SeedPeers()
 	}
+
 	peers = append(peers, masters...)
 	for _, p := range peers {
 		m.addDiscoveredPeer(p)
diff --git a/packages/autopeering/discover/manager_test.go b/packages/autopeering/discover/manager_test.go
index fb0132a1b4f1b074a53409f8e4bed9bd62f335e4..1ae1494d0d3037a36c93340ec52411169d6b7158 100644
--- a/packages/autopeering/discover/manager_test.go
+++ b/packages/autopeering/discover/manager_test.go
@@ -6,70 +6,26 @@ import (
 	"time"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
-	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/iotaledger/goshimmer/packages/autopeering/server"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/require"
 )
 
-type NetworkMock struct {
-	mock.Mock
-
-	loc *peer.Local
-}
-
-func (m *NetworkMock) local() *peer.Local {
-	return m.loc
-}
-
-func (m *NetworkMock) Ping(p *peer.Peer) error {
-	args := m.Called(p)
-	return args.Error(0)
-}
-
-func (m *NetworkMock) DiscoveryRequest(p *peer.Peer) ([]*peer.Peer, error) {
-	args := m.Called(p)
-	return args.Get(0).([]*peer.Peer), args.Error(1)
-}
-
-func newNetworkMock() *NetworkMock {
-	services := service.New()
-	services.Update(service.PeeringKey, "mock", "local")
-	local, _ := peer.NewLocal(services, peer.NewMemoryDB(log))
-	return &NetworkMock{
-		// no database needed
-		loc: local,
-	}
-}
-
-func newDummyPeer(name string) *peer.Peer {
-	services := service.New()
-	services.Update(service.PeeringKey, "dummy", name)
-
-	return peer.NewPeer([]byte(name), services)
-}
-
-func newTestManager() (*manager, *NetworkMock, func()) {
-	networkMock := newNetworkMock()
-	mgr := newManager(networkMock, nil, log)
-	teardown := func() {
-		mgr.close()
-	}
-	return mgr, networkMock, teardown
-}
-
 func TestMgrClose(t *testing.T) {
-	_, _, teardown := newTestManager()
+	_, _, teardown := newManagerTest(t)
 	defer teardown()
 
 	time.Sleep(graceTime)
 }
 
 func TestMgrVerifyDiscoveredPeer(t *testing.T) {
-	mgr, m, teardown := newTestManager()
+	mgr, m, teardown := newManagerTest(t)
 	defer teardown()
 
-	p := newDummyPeer("p")
+	p := peertest.NewPeer(testNetwork, "p")
 
 	// expect Ping of peer p
 	m.On("Ping", p).Return(nil).Once()
@@ -86,10 +42,10 @@ func TestMgrVerifyDiscoveredPeer(t *testing.T) {
 }
 
 func TestMgrReverifyPeer(t *testing.T) {
-	mgr, m, teardown := newTestManager()
+	mgr, m, teardown := newManagerTest(t)
 	defer teardown()
 
-	p := newDummyPeer("p")
+	p := peertest.NewPeer(testNetwork, "p")
 
 	// expect Ping of peer p
 	m.On("Ping", p).Return(nil).Once()
@@ -106,11 +62,11 @@ func TestMgrReverifyPeer(t *testing.T) {
 }
 
 func TestMgrRequestDiscoveredPeer(t *testing.T) {
-	mgr, m, teardown := newTestManager()
+	mgr, m, teardown := newManagerTest(t)
 	defer teardown()
 
-	p1 := newDummyPeer("verified")
-	p2 := newDummyPeer("discovered")
+	p1 := peertest.NewPeer(testNetwork, "verified")
+	p2 := peertest.NewPeer(testNetwork, "discovered")
 
 	// expect DiscoveryRequest on the discovered peer
 	m.On("DiscoveryRequest", p1).Return([]*peer.Peer{p2}, nil).Once()
@@ -125,10 +81,10 @@ func TestMgrRequestDiscoveredPeer(t *testing.T) {
 }
 
 func TestMgrAddManyVerifiedPeers(t *testing.T) {
-	mgr, m, teardown := newTestManager()
+	mgr, m, teardown := newManagerTest(t)
 	defer teardown()
 
-	p := newDummyPeer("p")
+	p := peertest.NewPeer(testNetwork, "p")
 
 	// expect Ping of peer p
 	m.On("Ping", p).Return(nil).Once()
@@ -140,7 +96,7 @@ func TestMgrAddManyVerifiedPeers(t *testing.T) {
 
 	mgr.addVerifiedPeer(p)
 	for i := 0; i < maxManaged+maxReplacements; i++ {
-		mgr.addVerifiedPeer(newDummyPeer(fmt.Sprintf("p%d", i)))
+		mgr.addVerifiedPeer(peertest.NewPeer(testNetwork, fmt.Sprintf("p%d", i)))
 	}
 
 	mgr.doReverify(make(chan struct{})) // manually trigger a verify
@@ -153,10 +109,10 @@ func TestMgrAddManyVerifiedPeers(t *testing.T) {
 }
 
 func TestMgrDeleteUnreachablePeer(t *testing.T) {
-	mgr, m, teardown := newTestManager()
+	mgr, m, teardown := newManagerTest(t)
 	defer teardown()
 
-	p := newDummyPeer("p")
+	p := peertest.NewPeer(testNetwork, "p")
 
 	// expect Ping of peer p, but return error
 	m.On("Ping", p).Return(server.ErrTimeout).Times(1)
@@ -168,7 +124,7 @@ func TestMgrDeleteUnreachablePeer(t *testing.T) {
 
 	mgr.addVerifiedPeer(p)
 	for i := 0; i < maxManaged; i++ {
-		mgr.addVerifiedPeer(newDummyPeer(fmt.Sprintf("p%d", i)))
+		mgr.addVerifiedPeer(peertest.NewPeer(testNetwork, fmt.Sprintf("p%d", i)))
 	}
 
 	mgr.doReverify(make(chan struct{})) // manually trigger a verify
@@ -179,3 +135,34 @@ func TestMgrDeleteUnreachablePeer(t *testing.T) {
 
 	m.AssertExpectations(t)
 }
+
+type NetworkMock struct {
+	mock.Mock
+
+	loc *peer.Local
+}
+
+func newManagerTest(t require.TestingT) (*manager, *NetworkMock, func()) {
+	db, err := peer.NewDB(mapdb.NewMapDB())
+	require.NoError(t, err)
+	local := peertest.NewLocal(testNetwork, testAddress, db)
+	networkMock := &NetworkMock{
+		loc: local,
+	}
+	mgr := newManager(networkMock, nil, log)
+	return mgr, networkMock, mgr.close
+}
+
+func (m *NetworkMock) local() *peer.Local {
+	return m.loc
+}
+
+func (m *NetworkMock) Ping(p *peer.Peer) error {
+	args := m.Called(p)
+	return args.Error(0)
+}
+
+func (m *NetworkMock) DiscoveryRequest(p *peer.Peer) ([]*peer.Peer, error) {
+	args := m.Called(p)
+	return args.Get(0).([]*peer.Peer), args.Error(1)
+}
diff --git a/packages/autopeering/discover/mpeer_test.go b/packages/autopeering/discover/mpeer_test.go
index 97a8f22d263509b8679f91d27de6774b00a160d0..f917382de684b4b2c7e74ee6748b742b5d39e1c5 100644
--- a/packages/autopeering/discover/mpeer_test.go
+++ b/packages/autopeering/discover/mpeer_test.go
@@ -5,22 +5,15 @@ import (
 	"testing"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
-	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/stretchr/testify/assert"
 )
 
-func newTestPeer(name string) *peer.Peer {
-	services := service.New()
-	services.Update(service.PeeringKey, "test", name)
-
-	return peer.NewPeer([]byte(name), services)
-}
-
 func TestUnwrapPeers(t *testing.T) {
 	m := make([]*mpeer, 5)
 	p := make([]*peer.Peer, 5)
 	for i := range m {
-		p[i] = newTestPeer(fmt.Sprintf("%d", i))
+		p[i] = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		m[i] = &mpeer{Peer: *p[i]}
 	}
 
@@ -31,9 +24,9 @@ func TestUnwrapPeers(t *testing.T) {
 func TestContainsPeer(t *testing.T) {
 	m := make([]*mpeer, 5)
 	p := make([]*peer.Peer, 5)
-	k := newTestPeer("k")
+	k := peertest.NewPeer(testNetwork, "k")
 	for i := range m {
-		p[i] = newTestPeer(fmt.Sprintf("%d", i))
+		p[i] = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		m[i] = &mpeer{Peer: *p[i]}
 	}
 
@@ -46,7 +39,7 @@ func TestContainsPeer(t *testing.T) {
 func TestUnshiftPeer(t *testing.T) {
 	m := make([]*mpeer, 5)
 	for i := range m {
-		m[i] = &mpeer{Peer: *newTestPeer(fmt.Sprintf("%d", i))}
+		m[i] = &mpeer{Peer: *peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))}
 	}
 
 	type testCase struct {
@@ -87,7 +80,7 @@ func TestUnshiftPeer(t *testing.T) {
 func TestDeletePeer(t *testing.T) {
 	m := make([]*mpeer, 5)
 	for i := range m {
-		m[i] = &mpeer{Peer: *newTestPeer(fmt.Sprintf("%d", i))}
+		m[i] = &mpeer{Peer: *peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))}
 	}
 
 	type testCase struct {
@@ -124,7 +117,7 @@ func TestDeletePeerByID(t *testing.T) {
 	m := make([]*mpeer, 5)
 	p := make([]*peer.Peer, 5)
 	for i := range m {
-		p[i] = newTestPeer(fmt.Sprintf("%d", i))
+		p[i] = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		m[i] = &mpeer{Peer: *p[i]}
 	}
 
@@ -162,7 +155,7 @@ func TestPushPeer(t *testing.T) {
 	m := make([]*mpeer, 5)
 	max := len(m) - 1
 	for i := range m {
-		m[i] = &mpeer{Peer: *newTestPeer(fmt.Sprintf("%d", i))}
+		m[i] = &mpeer{Peer: *peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))}
 	}
 
 	type testCase struct {
diff --git a/packages/autopeering/discover/protocol.go b/packages/autopeering/discover/protocol.go
index 07790b9467f1e7ee1406bb867910a9c267b043fa..4b4ccb2e8f9ebf020274898908ac1b7a12b6a8ad 100644
--- a/packages/autopeering/discover/protocol.go
+++ b/packages/autopeering/discover/protocol.go
@@ -89,17 +89,12 @@ func (p *Protocol) EnsureVerified(peer *peer.Peer) error {
 
 // GetVerifiedPeer returns the verified peer with the given ID, or nil if no such peer exists.
 func (p *Protocol) GetVerifiedPeer(id peer.ID, addr string) *peer.Peer {
-	if !p.IsVerified(id, addr) {
-		return nil
-	}
-	peer := p.loc.Database().Peer(id)
-	if peer == nil {
-		return nil
-	}
-	if peer.Address() != addr {
-		return nil
+	for _, verified := range p.mgr.getVerifiedPeers() {
+		if verified.ID() == id && verified.Address() == addr {
+			return unwrapPeer(verified)
+		}
 	}
-	return peer
+	return nil
 }
 
 // GetVerifiedPeers returns all the currently managed peers that have been verified at least once.
diff --git a/packages/autopeering/discover/protocol_test.go b/packages/autopeering/discover/protocol_test.go
index 92a5740dd64f69e2301bf070ab6c2a1e11251102..2a5842806b8ab37726a90a4cd22cd4c59df61326 100644
--- a/packages/autopeering/discover/protocol_test.go
+++ b/packages/autopeering/discover/protocol_test.go
@@ -5,16 +5,22 @@ import (
 	"time"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
 	"github.com/iotaledger/goshimmer/packages/autopeering/server"
 	"github.com/iotaledger/goshimmer/packages/autopeering/transport"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/iotaledger/hive.go/logger"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"go.uber.org/zap"
 )
 
-const graceTime = 100 * time.Millisecond
+const (
+	testNetwork = "test"
+	testAddress = "test"
+	graceTime   = 100 * time.Millisecond
+)
 
 var log = logger.NewExampleLogger("discover")
 
@@ -28,46 +34,17 @@ func init() {
 	})
 }
 
-// newTest creates a new discovery server and also returns the teardown.
-func newTest(t require.TestingT, trans transport.Transport, logger *logger.Logger, masters ...*peer.Peer) (*server.Server, *Protocol, func()) {
-	l := logger.Named(trans.LocalAddr().String())
-
-	services := service.New()
-	services.Update(service.PeeringKey, trans.LocalAddr().Network(), trans.LocalAddr().String())
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
-	require.NoError(t, err)
-
-	cfg := Config{
-		Log:         l,
-		MasterPeers: masters,
-	}
-	prot := New(local, cfg)
-	srv := server.Serve(local, trans, l.Named("srv"), prot)
-	prot.Start(srv)
-
-	teardown := func() {
-		srv.Close()
-		prot.Close()
-		db.Close()
-	}
-	return srv, prot, teardown
-}
-
-func getPeer(s *server.Server) *peer.Peer {
-	return &s.Local().Peer
-}
-
 func TestProtVerifyMaster(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	srvA, _, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	peerA := getPeer(srvA)
+
+	peerA := getPeer(protA)
 
 	// use peerA as masters peer
-	_, protB, closeB := newTest(t, p2p.B, log, peerA)
+	protB, closeB := newTestProtocol(p2p.B, log, peerA)
 
 	time.Sleep(graceTime) // wait for the packages to ripple through the network
 	closeB()              // close srvB to avoid race conditions, when asserting
@@ -82,13 +59,13 @@ func TestProtPingPong(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	srvA, protA, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, protB, closeB := newTest(t, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerA := getPeer(srvA)
-	peerB := getPeer(srvB)
+	peerA := getPeer(protA)
+	peerB := getPeer(protB)
 
 	// send a Ping from node A to B
 	t.Run("A->B", func(t *testing.T) { assert.NoError(t, protA.Ping(peerB)) })
@@ -103,15 +80,13 @@ func TestProtPingTimeout(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	_, protA, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, _, closeB := newTest(t, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	closeB() // close the connection right away to prevent any replies
 
-	peerB := getPeer(srvB)
-
 	// send a Ping from node A to B
-	err := protA.Ping(peerB)
+	err := protA.Ping(getPeer(protB))
 	assert.EqualError(t, err, server.ErrTimeout.Error())
 }
 
@@ -119,12 +94,12 @@ func TestProtVerifiedPeers(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	_, protA, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, _, closeB := newTest(t, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerB := getPeer(srvB)
+	peerB := getPeer(protB)
 
 	// send a Ping from node A to B
 	assert.NoError(t, protA.Ping(peerB))
@@ -141,13 +116,13 @@ func TestProtVerifiedPeer(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	srvA, protA, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, _, closeB := newTest(t, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerA := getPeer(srvA)
-	peerB := getPeer(srvB)
+	peerA := getPeer(protA)
+	peerB := getPeer(protB)
 
 	// send a Ping from node A to B
 	assert.NoError(t, protA.Ping(peerB))
@@ -155,7 +130,7 @@ func TestProtVerifiedPeer(t *testing.T) {
 
 	// we should have peerB as a verified peer
 	assert.Equal(t, peerB, protA.GetVerifiedPeer(peerB.ID(), peerB.Address()))
-	// we should not have ourself as a verified peer
+	// we should not have ourselves as a verified peer
 	assert.Nil(t, protA.GetVerifiedPeer(peerA.ID(), peerA.Address()))
 	// the address of peerB should match
 	assert.Nil(t, protA.GetVerifiedPeer(peerB.ID(), ""))
@@ -165,13 +140,13 @@ func TestProtDiscoveryRequest(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	srvA, protA, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, protB, closeB := newTest(t, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerA := getPeer(srvA)
-	peerB := getPeer(srvB)
+	peerA := getPeer(protA)
+	peerB := getPeer(protB)
 
 	// request peers from node A
 	t.Run("A->B", func(t *testing.T) {
@@ -191,20 +166,23 @@ func TestProtServices(t *testing.T) {
 	p2p := transport.P2P()
 	defer p2p.Close()
 
-	srvA, _, closeA := newTest(t, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	err := srvA.Local().UpdateService(service.FPCKey, "fpc", p2p.A.LocalAddr().String())
+
+	err := protA.local().UpdateService(service.FPCKey, "fpc", p2p.A.LocalAddr().String())
 	require.NoError(t, err)
 
+	peerA := getPeer(protA)
+
 	// use peerA as masters peer
-	_, protB, closeB := newTest(t, p2p.B, log, getPeer(srvA))
+	protB, closeB := newTestProtocol(p2p.B, log, peerA)
 	defer closeB()
 
 	time.Sleep(graceTime) // wait for the packages to ripple through the network
 	ps := protB.GetVerifiedPeers()
 
-	if assert.ElementsMatch(t, []*peer.Peer{getPeer(srvA)}, ps) {
-		assert.Equal(t, srvA.Local().Services(), ps[0].Services())
+	if assert.ElementsMatch(t, []*peer.Peer{peerA}, ps) {
+		assert.Equal(t, protA.local().Services(), ps[0].Services())
 	}
 }
 
@@ -212,25 +190,25 @@ func TestProtDiscovery(t *testing.T) {
 	net := transport.NewNetwork("M", "A", "B", "C")
 	defer net.Close()
 
-	srvM, protM, closeM := newTest(t, net.GetTransport("M"), log)
+	protM, closeM := newTestProtocol(net.GetTransport("M"), log)
 	defer closeM()
 	time.Sleep(graceTime) // wait for the master to initialize
 
-	srvA, protA, closeA := newTest(t, net.GetTransport("A"), log, getPeer(srvM))
+	protA, closeA := newTestProtocol(net.GetTransport("A"), log, getPeer(protM))
 	defer closeA()
-	srvB, protB, closeB := newTest(t, net.GetTransport("B"), log, getPeer(srvM))
+	protB, closeB := newTestProtocol(net.GetTransport("B"), log, getPeer(protM))
 	defer closeB()
-	srvC, protC, closeC := newTest(t, net.GetTransport("C"), log, getPeer(srvM))
+	protC, closeC := newTestProtocol(net.GetTransport("C"), log, getPeer(protM))
 	defer closeC()
 
 	time.Sleep(queryInterval + graceTime)    // wait for the next discovery cycle
 	time.Sleep(reverifyInterval + graceTime) // wait for the next verification cycle
 
 	// now the full network should be discovered
-	assert.ElementsMatch(t, []*peer.Peer{getPeer(srvA), getPeer(srvB), getPeer(srvC)}, protM.GetVerifiedPeers())
-	assert.ElementsMatch(t, []*peer.Peer{getPeer(srvM), getPeer(srvB), getPeer(srvC)}, protA.GetVerifiedPeers())
-	assert.ElementsMatch(t, []*peer.Peer{getPeer(srvM), getPeer(srvA), getPeer(srvC)}, protB.GetVerifiedPeers())
-	assert.ElementsMatch(t, []*peer.Peer{getPeer(srvM), getPeer(srvA), getPeer(srvB)}, protC.GetVerifiedPeers())
+	assert.ElementsMatch(t, []*peer.Peer{getPeer(protA), getPeer(protB), getPeer(protC)}, protM.GetVerifiedPeers())
+	assert.ElementsMatch(t, []*peer.Peer{getPeer(protM), getPeer(protB), getPeer(protC)}, protA.GetVerifiedPeers())
+	assert.ElementsMatch(t, []*peer.Peer{getPeer(protM), getPeer(protA), getPeer(protC)}, protB.GetVerifiedPeers())
+	assert.ElementsMatch(t, []*peer.Peer{getPeer(protM), getPeer(protA), getPeer(protB)}, protC.GetVerifiedPeers())
 }
 
 func BenchmarkPingPong(b *testing.B) {
@@ -242,12 +220,12 @@ func BenchmarkPingPong(b *testing.B) {
 	reverifyInterval = time.Hour
 	queryInterval = time.Hour
 
-	_, protA, closeA := newTest(b, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, _, closeB := newTest(b, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerB := getPeer(srvB)
+	peerB := getPeer(protB)
 
 	// send initial Ping to ensure that every peer is verified
 	err := protA.Ping(peerB)
@@ -272,12 +250,12 @@ func BenchmarkDiscoveryRequest(b *testing.B) {
 	reverifyInterval = time.Hour
 	queryInterval = time.Hour
 
-	_, protA, closeA := newTest(b, p2p.A, log)
+	protA, closeA := newTestProtocol(p2p.A, log)
 	defer closeA()
-	srvB, _, closeB := newTest(b, p2p.B, log)
+	protB, closeB := newTestProtocol(p2p.B, log)
 	defer closeB()
 
-	peerB := getPeer(srvB)
+	peerB := getPeer(protB)
 
 	// send initial DiscoveryRequest to ensure that every peer is verified
 	_, err := protA.DiscoveryRequest(peerB)
@@ -291,3 +269,25 @@ func BenchmarkDiscoveryRequest(b *testing.B) {
 
 	b.StopTimer()
 }
+
+// newTestProtocol creates a new discovery server and also returns the teardown.
+func newTestProtocol(trans transport.Transport, logger *logger.Logger, masters ...*peer.Peer) (*Protocol, func()) {
+	db, _ := peer.NewDB(mapdb.NewMapDB())
+	local := peertest.NewLocal(trans.LocalAddr().Network(), trans.LocalAddr().String(), db)
+	log := logger.Named(trans.LocalAddr().String())
+
+	prot := New(local, Config{Log: log, MasterPeers: masters})
+
+	srv := server.Serve(local, trans, log, prot)
+	prot.Start(srv)
+
+	teardown := func() {
+		srv.Close()
+		prot.Close()
+	}
+	return prot, teardown
+}
+
+func getPeer(p *Protocol) *peer.Peer {
+	return &p.local().Peer
+}
diff --git a/packages/autopeering/peer/local.go b/packages/autopeering/peer/local.go
index 989d0ff83402f4983bd0a7c33015f43a2aab5d40..e4a8861e7fe4a68ea2e23bce3a54b01ec527fa07 100644
--- a/packages/autopeering/peer/local.go
+++ b/packages/autopeering/peer/local.go
@@ -13,7 +13,7 @@ import (
 type Local struct {
 	Peer
 	key PrivateKey
-	db  DB
+	db  *DB
 
 	// everything below is protected by a lock
 	mu            sync.RWMutex
@@ -32,7 +32,7 @@ func (priv PrivateKey) Public() PublicKey {
 }
 
 // newLocal creates a new local peer.
-func newLocal(key PrivateKey, serviceRecord *service.Record, db DB) *Local {
+func newLocal(key PrivateKey, serviceRecord *service.Record, db *DB) *Local {
 	return &Local{
 		Peer:          *NewPeer(key.Public(), serviceRecord),
 		key:           key,
@@ -44,12 +44,14 @@ func newLocal(key PrivateKey, serviceRecord *service.Record, db DB) *Local {
 // NewLocal creates a new local peer linked to the provided db.
 // If an optional seed is provided, the seed is used to generate the private key. Without a seed,
 // the provided key is loaded from the provided database and generated if not stored there.
-func NewLocal(serviceRecord *service.Record, db DB, seed ...[]byte) (*Local, error) {
+func NewLocal(serviceRecord *service.Record, db *DB, seed ...[]byte) (*Local, error) {
 	var key PrivateKey
 	if len(seed) > 0 {
 		key = PrivateKey(ed25519.NewKeyFromSeed(seed[0]))
-		if err := db.UpdateLocalPrivateKey(key); err != nil {
-			return nil, err
+		if db != nil {
+			if err := db.UpdateLocalPrivateKey(key); err != nil {
+				return nil, err
+			}
 		}
 	} else {
 		var err error
@@ -66,7 +68,7 @@ func NewLocal(serviceRecord *service.Record, db DB, seed ...[]byte) (*Local, err
 }
 
 // Database returns the node database associated with the local peer.
-func (l *Local) Database() DB {
+func (l *Local) Database() *DB {
 	return l.db
 }
 
diff --git a/packages/autopeering/peer/local_test.go b/packages/autopeering/peer/local_test.go
index 469ef80f7c7a6f9a7b596cef7a4fcc6debb9af5f..5d61bfd4b326895d6921909fb96ff7373c178fa4 100644
--- a/packages/autopeering/peer/local_test.go
+++ b/packages/autopeering/peer/local_test.go
@@ -56,7 +56,7 @@ func TestPublicSalt(t *testing.T) {
 	assert.Equal(t, s, got, "Public salt")
 }
 
-func newTestLocal(t require.TestingT, db DB) *Local {
+func newTestLocal(t require.TestingT, db *DB) *Local {
 	var priv PrivateKey
 	var err error
 	if db == nil {
diff --git a/packages/autopeering/peer/mapdb.go b/packages/autopeering/peer/mapdb.go
deleted file mode 100644
index a3ebbb1a1aa45c420565fe353ddc5a95722d2fba..0000000000000000000000000000000000000000
--- a/packages/autopeering/peer/mapdb.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package peer
-
-import (
-	"sync"
-	"time"
-
-	"github.com/iotaledger/hive.go/logger"
-)
-
-// mapDB is a simple implementation of DB using a map.
-type mapDB struct {
-	mutex sync.RWMutex
-	m     map[string]peerEntry
-	key   PrivateKey
-
-	log *logger.Logger
-
-	wg        sync.WaitGroup
-	closeOnce sync.Once
-	closing   chan struct{}
-}
-
-type peerEntry struct {
-	data       []byte
-	properties map[string]peerPropEntry
-}
-
-type peerPropEntry struct {
-	lastPing, lastPong int64
-}
-
-// NewMemoryDB creates a new DB that uses a GO map.
-func NewMemoryDB(log *logger.Logger) DB {
-	db := &mapDB{
-		m:       make(map[string]peerEntry),
-		log:     log,
-		closing: make(chan struct{}),
-	}
-
-	// start the expirer routine
-	db.wg.Add(1)
-	go db.expirer()
-
-	return db
-}
-
-// Close closes the DB.
-func (db *mapDB) Close() {
-	db.closeOnce.Do(func() {
-		db.log.Debugf("closing")
-		close(db.closing)
-		db.wg.Wait()
-	})
-}
-
-// LocalPrivateKey returns the private key stored in the database or creates a new one.
-func (db *mapDB) LocalPrivateKey() (PrivateKey, error) {
-	db.mutex.Lock()
-	defer db.mutex.Unlock()
-
-	if db.key == nil {
-		key, err := generatePrivateKey()
-		db.key = key
-		return key, err
-	}
-
-	return db.key, nil
-}
-
-// UpdateLocalPrivateKey stores the provided key in the database.
-func (db *mapDB) UpdateLocalPrivateKey(key PrivateKey) error {
-	db.mutex.Lock()
-	defer db.mutex.Unlock()
-
-	db.key = key
-	return nil
-}
-
-// LastPing returns that property for the given peer ID and address.
-func (db *mapDB) LastPing(id ID, address string) time.Time {
-	db.mutex.RLock()
-	defer db.mutex.RUnlock()
-
-	peerEntry := db.m[string(id.Bytes())]
-	return time.Unix(peerEntry.properties[address].lastPing, 0)
-}
-
-// UpdateLastPing updates that property for the given peer ID and address.
-func (db *mapDB) UpdateLastPing(id ID, address string, t time.Time) error {
-	key := string(id.Bytes())
-
-	db.mutex.Lock()
-	defer db.mutex.Unlock()
-
-	peerEntry := db.m[key]
-	if peerEntry.properties == nil {
-		peerEntry.properties = make(map[string]peerPropEntry)
-	}
-	entry := peerEntry.properties[address]
-	entry.lastPing = t.Unix()
-	peerEntry.properties[address] = entry
-	db.m[key] = peerEntry
-
-	return nil
-}
-
-// LastPong returns that property for the given peer ID and address.
-func (db *mapDB) LastPong(id ID, address string) time.Time {
-	db.mutex.RLock()
-	defer db.mutex.RUnlock()
-
-	peerEntry := db.m[string(id.Bytes())]
-	return time.Unix(peerEntry.properties[address].lastPong, 0)
-}
-
-// UpdateLastPong updates that property for the given peer ID and address.
-func (db *mapDB) UpdateLastPong(id ID, address string, t time.Time) error {
-	key := string(id.Bytes())
-
-	db.mutex.Lock()
-	defer db.mutex.Unlock()
-
-	peerEntry := db.m[key]
-	if peerEntry.properties == nil {
-		peerEntry.properties = make(map[string]peerPropEntry)
-	}
-	entry := peerEntry.properties[address]
-	entry.lastPong = t.Unix()
-	peerEntry.properties[address] = entry
-	db.m[key] = peerEntry
-
-	return nil
-}
-
-// UpdatePeer updates a peer in the database.
-func (db *mapDB) UpdatePeer(p *Peer) error {
-	data, err := p.Marshal()
-	if err != nil {
-		return err
-	}
-	key := string(p.ID().Bytes())
-
-	db.mutex.Lock()
-	defer db.mutex.Unlock()
-
-	peerEntry := db.m[key]
-	peerEntry.data = data
-	db.m[key] = peerEntry
-
-	return nil
-}
-
-// Peer retrieves a peer from the database.
-func (db *mapDB) Peer(id ID) *Peer {
-	db.mutex.RLock()
-	peerEntry := db.m[string(id.Bytes())]
-	db.mutex.RUnlock()
-
-	if peerEntry.data == nil {
-		return nil
-	}
-	return parsePeer(peerEntry.data)
-}
-
-// SeedPeers retrieves random nodes to be used as potential bootstrap peers.
-func (db *mapDB) SeedPeers() []*Peer {
-	peers := make([]*Peer, 0)
-	now := time.Now()
-
-	db.mutex.RLock()
-	for id, peerEntry := range db.m {
-		p := parsePeer(peerEntry.data)
-		if p == nil || id != string(p.ID().Bytes()) {
-			continue
-		}
-		if now.Sub(db.LastPong(p.ID(), p.Address())) > seedExpiration {
-			continue
-		}
-
-		peers = append(peers, p)
-	}
-	db.mutex.RUnlock()
-
-	return randomSubset(peers, seedCount)
-}
-
-func (db *mapDB) expirer() {
-	defer db.wg.Done()
-
-	// the expiring isn't triggert right away, to give the bootstrapping the chance to use older nodes
-	tick := time.NewTicker(cleanupInterval)
-	defer tick.Stop()
-
-	for {
-		select {
-		case <-tick.C:
-			db.expirePeers()
-		case <-db.closing:
-			return
-		}
-	}
-}
-
-func (db *mapDB) expirePeers() {
-	var (
-		threshold = time.Now().Add(-peerExpiration).Unix()
-		count     int
-	)
-
-	db.mutex.Lock()
-	for id, peerEntry := range db.m {
-		for address, peerPropEntry := range peerEntry.properties {
-			if peerPropEntry.lastPong <= threshold {
-				delete(peerEntry.properties, address)
-			}
-		}
-		if len(peerEntry.properties) == 0 {
-			delete(db.m, id)
-			count++
-		}
-	}
-	db.mutex.Unlock()
-
-	db.log.Debugw("expired peers", "count", count)
-}
diff --git a/packages/autopeering/peer/mapdb_test.go b/packages/autopeering/peer/mapdb_test.go
deleted file mode 100644
index 494f669181661461f3636b79add9e715cba463f6..0000000000000000000000000000000000000000
--- a/packages/autopeering/peer/mapdb_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package peer
-
-import (
-	"crypto/ed25519"
-	"testing"
-	"time"
-
-	"github.com/iotaledger/hive.go/logger"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-var log = logger.NewExampleLogger("peer")
-
-func TestMapDBPing(t *testing.T) {
-	p := newTestPeer()
-	db := NewMemoryDB(log)
-
-	now := time.Now()
-	err := db.UpdateLastPing(p.ID(), p.Address(), now)
-	require.NoError(t, err)
-
-	assert.Equal(t, now.Unix(), db.LastPing(p.ID(), p.Address()).Unix())
-}
-
-func TestMapDBPong(t *testing.T) {
-	p := newTestPeer()
-	db := NewMemoryDB(log)
-
-	now := time.Now()
-	err := db.UpdateLastPong(p.ID(), p.Address(), now)
-	require.NoError(t, err)
-
-	assert.Equal(t, now.Unix(), db.LastPong(p.ID(), p.Address()).Unix())
-}
-
-func TestMapDBPeer(t *testing.T) {
-	p := newTestPeer()
-	db := NewMemoryDB(log)
-
-	err := db.UpdatePeer(p)
-	require.NoError(t, err)
-
-	assert.Equal(t, p, db.Peer(p.ID()))
-}
-
-func TestMapDBSeedPeers(t *testing.T) {
-	p := newTestPeer()
-	db := NewMemoryDB(log)
-
-	require.NoError(t, db.UpdatePeer(p))
-	require.NoError(t, db.UpdateLastPong(p.ID(), p.Address(), time.Now()))
-
-	peers := db.SeedPeers()
-	assert.ElementsMatch(t, []*Peer{p}, peers)
-}
-
-func TestMapDBLocal(t *testing.T) {
-	db := NewMemoryDB(log)
-
-	l1 := newTestLocal(t, db)
-	assert.Equal(t, len(l1.PublicKey()), ed25519.PublicKeySize)
-
-	l2 := newTestLocal(t, db)
-	assert.Equal(t, l1, l2)
-}
diff --git a/packages/autopeering/peer/peer.go b/packages/autopeering/peer/peer.go
index 768511caf2cd39b9a241284b1e098b762ea010fc..cea3443371a8bb7f9e4f12a9d98d90e7b9122fe1 100644
--- a/packages/autopeering/peer/peer.go
+++ b/packages/autopeering/peer/peer.go
@@ -12,6 +12,7 @@ import (
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
 )
 
+// Errors in the peer package.
 var (
 	ErrNeedsPeeringService = errors.New("needs peering service")
 	ErrInvalidSignature    = errors.New("invalid signature")
diff --git a/packages/autopeering/peer/peer_test.go b/packages/autopeering/peer/peer_test.go
index 77d59125bb74f89c1f40b2f1b4f82722543f1846..a3375684c0344e859d3c2afe302ffad5875d0cf7 100644
--- a/packages/autopeering/peer/peer_test.go
+++ b/packages/autopeering/peer/peer_test.go
@@ -22,8 +22,9 @@ func newTestServiceRecord() *service.Record {
 	return services
 }
 
-func newTestPeer() *Peer {
+func newTestPeer(name string) *Peer {
 	key := make([]byte, ed25519.PublicKeySize)
+	copy(key, name)
 	return NewPeer(key, newTestServiceRecord())
 }
 
@@ -47,7 +48,7 @@ func TestInvalidServicePeer(t *testing.T) {
 }
 
 func TestMarshalUnmarshal(t *testing.T) {
-	p := newTestPeer()
+	p := newTestPeer("test")
 	data, err := p.Marshal()
 	require.NoError(t, err)
 
diff --git a/packages/autopeering/peer/peerdb.go b/packages/autopeering/peer/peerdb.go
index b20fcd052b304c5dc9b4fac5b09a3ca31dce8fa5..d0972c77484131e60909c29397d8e562f33795c3 100644
--- a/packages/autopeering/peer/peerdb.go
+++ b/packages/autopeering/peer/peerdb.go
@@ -4,11 +4,9 @@ import (
 	"bytes"
 	"encoding/binary"
 	"math/rand"
-	"sync"
 	"time"
 
 	"github.com/iotaledger/goshimmer/packages/database"
-	"github.com/iotaledger/hive.go/logger"
 )
 
 const (
@@ -23,40 +21,16 @@ const (
 	seedExpiration = 5 * 24 * time.Hour
 )
 
-// DB is the peer database, storing previously seen peers and any collected
-// properties of them.
-type DB interface {
-	// LocalPrivateKey returns the private key stored in the database or creates a new one.
-	LocalPrivateKey() (PrivateKey, error)
-	// UpdateLocalPrivateKey stores the provided key in the database.
-	UpdateLocalPrivateKey(key PrivateKey) error
-
-	// Peer retrieves a peer from the database.
-	Peer(id ID) *Peer
-	// UpdatePeer updates a peer in the database.
-	UpdatePeer(p *Peer) error
-	// SeedPeers retrieves random nodes to be used as potential bootstrap peers.
-	SeedPeers() []*Peer
-
-	// LastPing returns that property for the given peer ID and address.
-	LastPing(id ID, address string) time.Time
-	// UpdateLastPing updates that property for the given peer ID and address.
-	UpdateLastPing(id ID, address string, t time.Time) error
-
-	// LastPong returns that property for the given peer ID and address.
-	LastPong(id ID, address string) time.Time
-	// UpdateLastPong updates that property for the given peer ID and address.
-	UpdateLastPong(id ID, address string, t time.Time) error
-
-	// Close closes the DB.
-	Close()
+// Database defines the database functionality required by DB.
+type Database interface {
+	Get(database.Key) (database.Entry, error)
+	Set(database.Entry) error
+	ForEachPrefix(database.KeyPrefix, func(database.Entry) bool) error
 }
 
-type persistentDB struct {
-	db  database.Database
-	log *logger.Logger
-
-	closeOnce sync.Once
+// DB is the peer database, storing previously seen peers and any collected properties of them.
+type DB struct {
+	db Database
 }
 
 // Keys in the node database.
@@ -72,56 +46,31 @@ const (
 	dbLocalKey = "key"
 )
 
-// NewPersistentDB creates a new persistent DB.
-func NewPersistentDB(log *logger.Logger) DB {
-	db, err := database.Get(database.DBPrefixAutoPeering, database.GetBadgerInstance())
-	if err != nil {
-		panic(err)
+// NewDB creates a new peer database.
+func NewDB(db Database) (*DB, error) {
+	pDB := &DB{
+		db: db,
 	}
-
-	pDB := &persistentDB{
-		db:  db,
-		log: log,
+	err := pDB.init()
+	if err != nil {
+		return nil, err
 	}
-	pDB.start()
-
-	return pDB
-}
-
-// Close closes the DB.
-func (db *persistentDB) Close() {
-	db.closeOnce.Do(func() {
-		db.persistSeeds()
-	})
+	return pDB, nil
 }
 
-func (db *persistentDB) start() {
+func (db *DB) init() error {
 	// get all peers in the DB
 	peers := db.getPeers(0)
 
 	for _, p := range peers {
 		// if they dont have an associated pong, give them a grace period
 		if db.LastPong(p.ID(), p.Address()).Unix() == 0 {
-			err := db.setPeerWithTTL(p, cleanupInterval)
-			if err != nil {
-				db.log.Warnw("database error", "err", err)
+			if err := db.setPeerWithTTL(p, cleanupInterval); err != nil {
+				return err
 			}
 		}
 	}
-}
-
-// persistSeeds assures that potential bootstrap peers are not garbage collected.
-func (db *persistentDB) persistSeeds() {
-	// randomly select potential bootstrap peers
-	peers := randomSubset(db.getPeers(peerExpiration), seedCount)
-
-	for _, p := range peers {
-		err := db.setPeerWithTTL(p, seedExpiration)
-		if err != nil {
-			db.log.Warnw("database error", "err", err)
-		}
-	}
-	db.log.Infof("%d bootstrap peers remain in DB", len(peers))
+	return nil
 }
 
 // nodeKey returns the database key for a node record.
@@ -129,15 +78,6 @@ func nodeKey(id ID) []byte {
 	return append([]byte(dbNodePrefix), id.Bytes()...)
 }
 
-func splitNodeKey(key []byte) (id ID, rest []byte) {
-	if !bytes.HasPrefix(key, []byte(dbNodePrefix)) {
-		return ID{}, nil
-	}
-	item := key[len(dbNodePrefix):]
-	copy(id[:], item[:len(id)])
-	return id, item[len(id):]
-}
-
 // nodeFieldKey returns the database key for a node metadata field.
 func nodeFieldKey(id ID, address string, field string) []byte {
 	return bytes.Join([][]byte{nodeKey(id), []byte(address), []byte(field)}, []byte{':'})
@@ -156,7 +96,7 @@ func parseInt64(blob []byte) int64 {
 }
 
 // getInt64 retrieves an integer associated with a particular key.
-func (db *persistentDB) getInt64(key []byte) int64 {
+func (db *DB) getInt64(key []byte) int64 {
 	entry, err := db.db.Get(key)
 	if err != nil {
 		return 0
@@ -165,14 +105,14 @@ func (db *persistentDB) getInt64(key []byte) int64 {
 }
 
 // setInt64 stores an integer in the given key.
-func (db *persistentDB) setInt64(key []byte, n int64) error {
+func (db *DB) setInt64(key []byte, n int64) error {
 	blob := make([]byte, binary.MaxVarintLen64)
 	blob = blob[:binary.PutVarint(blob, n)]
 	return db.db.Set(database.Entry{Key: key, Value: blob, TTL: peerExpiration})
 }
 
 // LocalPrivateKey returns the private key stored in the database or creates a new one.
-func (db *persistentDB) LocalPrivateKey() (PrivateKey, error) {
+func (db *DB) LocalPrivateKey() (PrivateKey, error) {
 	entry, err := db.db.Get(localFieldKey(dbLocalKey))
 	if err == database.ErrKeyNotFound {
 		key, genErr := generatePrivateKey()
@@ -188,31 +128,31 @@ func (db *persistentDB) LocalPrivateKey() (PrivateKey, error) {
 }
 
 // UpdateLocalPrivateKey stores the provided key in the database.
-func (db *persistentDB) UpdateLocalPrivateKey(key PrivateKey) error {
+func (db *DB) UpdateLocalPrivateKey(key PrivateKey) error {
 	return db.db.Set(database.Entry{Key: localFieldKey(dbLocalKey), Value: []byte(key)})
 }
 
 // LastPing returns that property for the given peer ID and address.
-func (db *persistentDB) LastPing(id ID, address string) time.Time {
+func (db *DB) LastPing(id ID, address string) time.Time {
 	return time.Unix(db.getInt64(nodeFieldKey(id, address, dbNodePing)), 0)
 }
 
 // UpdateLastPing updates that property for the given peer ID and address.
-func (db *persistentDB) UpdateLastPing(id ID, address string, t time.Time) error {
+func (db *DB) UpdateLastPing(id ID, address string, t time.Time) error {
 	return db.setInt64(nodeFieldKey(id, address, dbNodePing), t.Unix())
 }
 
-// LastPing returns that property for the given peer ID and address.
-func (db *persistentDB) LastPong(id ID, address string) time.Time {
+// LastPong returns that property for the given peer ID and address.
+func (db *DB) LastPong(id ID, address string) time.Time {
 	return time.Unix(db.getInt64(nodeFieldKey(id, address, dbNodePong)), 0)
 }
 
-// UpdateLastPing updates that property for the given peer ID and address.
-func (db *persistentDB) UpdateLastPong(id ID, address string, t time.Time) error {
+// UpdateLastPong updates that property for the given peer ID and address.
+func (db *DB) UpdateLastPong(id ID, address string, t time.Time) error {
 	return db.setInt64(nodeFieldKey(id, address, dbNodePong), t.Unix())
 }
 
-func (db *persistentDB) setPeerWithTTL(p *Peer, ttl time.Duration) error {
+func (db *DB) setPeerWithTTL(p *Peer, ttl time.Duration) error {
 	data, err := p.Marshal()
 	if err != nil {
 		return err
@@ -220,7 +160,8 @@ func (db *persistentDB) setPeerWithTTL(p *Peer, ttl time.Duration) error {
 	return db.db.Set(database.Entry{Key: nodeKey(p.ID()), Value: data, TTL: ttl})
 }
 
-func (db *persistentDB) UpdatePeer(p *Peer) error {
+// UpdatePeer updates a peer in the database.
+func (db *DB) UpdatePeer(p *Peer) error {
 	return db.setPeerWithTTL(p, peerExpiration)
 }
 
@@ -232,7 +173,8 @@ func parsePeer(data []byte) *Peer {
 	return p
 }
 
-func (db *persistentDB) Peer(id ID) *Peer {
+// Peer retrieves a peer from the database.
+func (db *DB) Peer(id ID) *Peer {
 	data, err := db.db.Get(nodeKey(id))
 	if err != nil {
 		return nil
@@ -254,40 +196,51 @@ func randomSubset(peers []*Peer, m int) []*Peer {
 	return result
 }
 
-func (db *persistentDB) getPeers(maxAge time.Duration) []*Peer {
-	peers := make([]*Peer, 0)
+func (db *DB) getPeers(maxAge time.Duration) (peers []*Peer) {
 	now := time.Now()
-
-	err := db.db.StreamForEachPrefix([]byte(dbNodePrefix), func(entry database.Entry) error {
-		id, rest := splitNodeKey(entry.Key)
-		if len(rest) > 0 {
-			return nil
+	err := db.db.ForEachPrefix([]byte(dbNodePrefix), func(entry database.Entry) bool {
+		var id ID
+		if len(entry.Key) != len(id) {
+			return false
 		}
+		copy(id[:], entry.Key)
 
 		p := parsePeer(entry.Value)
 		if p == nil || p.ID() != id {
-			return nil
+			return false
 		}
 		if maxAge > 0 && now.Sub(db.LastPong(p.ID(), p.Address())) > maxAge {
-			return nil
+			return false
 		}
 
 		peers = append(peers, p)
-		return nil
+		return false
 	})
 	if err != nil {
-		return []*Peer{}
+		return nil
 	}
 	return peers
 }
 
 // SeedPeers retrieves random nodes to be used as potential bootstrap peers.
-func (db *persistentDB) SeedPeers() []*Peer {
+func (db *DB) SeedPeers() []*Peer {
 	// get all stored peers and select subset
 	peers := db.getPeers(0)
 
-	seeds := randomSubset(peers, seedCount)
-	db.log.Infof("%d potential bootstrap peers restored form DB", len(seeds))
+	return randomSubset(peers, seedCount)
+}
 
-	return seeds
+// PersistSeeds assures that potential bootstrap peers are not garbage collected.
+// It returns the number of peers that have been persisted.
+func (db *DB) PersistSeeds() int {
+	// randomly select potential bootstrap peers
+	peers := randomSubset(db.getPeers(peerExpiration), seedCount)
+
+	for i, p := range peers {
+		err := db.setPeerWithTTL(p, seedExpiration)
+		if err != nil {
+			return i
+		}
+	}
+	return len(peers)
 }
diff --git a/packages/autopeering/peer/peerdb_test.go b/packages/autopeering/peer/peerdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f52f45c94aaada125443995843df72d69554421e
--- /dev/null
+++ b/packages/autopeering/peer/peerdb_test.go
@@ -0,0 +1,90 @@
+package peer
+
+import (
+	"crypto/ed25519"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestPeerDB(t *testing.T) {
+	db, err := NewDB(mapdb.NewMapDB())
+	require.NoError(t, err)
+	p := newTestPeer("test")
+
+	t.Run("LocalPrivateKey", func(t *testing.T) {
+		key, err := db.LocalPrivateKey()
+		require.NoError(t, err)
+
+		assert.EqualValues(t, ed25519.PrivateKeySize, len(key))
+		assert.EqualValues(t, ed25519.PublicKeySize, len(key.Public()))
+
+		err = db.UpdateLocalPrivateKey(key)
+		require.NoError(t, err)
+
+		key2, err := db.LocalPrivateKey()
+		require.NoError(t, err)
+
+		assert.Equal(t, key, key2)
+	})
+
+	t.Run("Peer", func(t *testing.T) {
+		err := db.UpdatePeer(p)
+		require.NoError(t, err)
+
+		assert.Equal(t, p, db.Peer(p.ID()))
+	})
+
+	t.Run("LastPing", func(t *testing.T) {
+		now := time.Now()
+		err := db.UpdateLastPing(p.ID(), p.Address(), now)
+		require.NoError(t, err)
+
+		assert.Equal(t, now.Unix(), db.LastPing(p.ID(), p.Address()).Unix())
+	})
+
+	t.Run("LastPong", func(t *testing.T) {
+		now := time.Now()
+		err := db.UpdateLastPong(p.ID(), p.Address(), now)
+		require.NoError(t, err)
+
+		assert.Equal(t, now.Unix(), db.LastPong(p.ID(), p.Address()).Unix())
+	})
+
+	t.Run("getPeers", func(t *testing.T) {
+		time.Sleep(time.Second) // let the old peers expire
+
+		newPeer := newTestPeer("new")
+		assert.NoError(t, db.UpdatePeer(newPeer))
+		assert.NoError(t, db.UpdateLastPong(newPeer.ID(), newPeer.Address(), time.Now()))
+
+		peers := db.getPeers(time.Second)
+		assert.ElementsMatch(t, []*Peer{newPeer}, peers)
+	})
+
+	t.Run("SeedPeers", func(t *testing.T) {
+		for i := 0; i < seedCount+1; i++ {
+			p := newTestPeer(fmt.Sprintf("SeedPeers%0d", i))
+			assert.NoError(t, db.UpdatePeer(p))
+			assert.NoError(t, db.UpdateLastPong(p.ID(), p.Address(), time.Now()))
+		}
+
+		peers := db.SeedPeers()
+		assert.EqualValues(t, seedCount, len(peers))
+	})
+
+	t.Run("PersistSeeds", func(t *testing.T) {
+		for i := 0; i < seedCount+1; i++ {
+			p := newTestPeer(fmt.Sprintf("PersistSeeds%0d", i))
+			assert.NoError(t, db.UpdatePeer(p))
+			assert.NoError(t, db.UpdateLastPong(p.ID(), p.Address(), time.Now()))
+		}
+
+		count := db.PersistSeeds()
+		assert.EqualValues(t, seedCount, count)
+	})
+}
diff --git a/packages/autopeering/peer/peertest/peertest.go b/packages/autopeering/peer/peertest/peertest.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1c0fb8f7618f64990babff41eac156effb52b82
--- /dev/null
+++ b/packages/autopeering/peer/peertest/peertest.go
@@ -0,0 +1,35 @@
+// Package peertest provides utilities for writing tests with the peer package.
+package peertest
+
+import (
+	"crypto/ed25519"
+	"log"
+	"math/rand"
+
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+)
+
+func NewPeer(network, address string) *peer.Peer {
+	services := service.New()
+	services.Update(service.PeeringKey, network, address)
+	key := make([]byte, ed25519.PublicKeySize)
+	copy(key, address)
+	return peer.NewPeer(key, services)
+}
+
+func NewLocal(network, address string, db *peer.DB) *peer.Local {
+	services := service.New()
+	services.Update(service.PeeringKey, network, address)
+	local, err := peer.NewLocal(services, db, randomSeed())
+	if err != nil {
+		log.Panic(err)
+	}
+	return local
+}
+
+func randomSeed() []byte {
+	seed := make([]byte, ed25519.SeedSize)
+	rand.Read(seed)
+	return seed
+}
diff --git a/packages/autopeering/selection/manager_test.go b/packages/autopeering/selection/manager_test.go
index cf5a961f77999fff984a77f75a483d6867b0bf22..5b9f746f7a8a130b3d5038433f04b5d4e08b4b6c 100644
--- a/packages/autopeering/selection/manager_test.go
+++ b/packages/autopeering/selection/manager_test.go
@@ -7,10 +7,10 @@ import (
 	"time"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
-	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/iotaledger/goshimmer/packages/autopeering/salt"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/iotaledger/hive.go/events"
-	"github.com/iotaledger/hive.go/logger"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -186,16 +186,6 @@ type networkMock struct {
 	mgr map[peer.ID]*manager
 }
 
-func newNetworkMock(name string, mgrMap map[peer.ID]*manager, log *logger.Logger) *networkMock {
-	services := service.New()
-	services.Update(service.PeeringKey, "mock", name)
-	local, _ := peer.NewLocal(services, peer.NewMemoryDB(log))
-	return &networkMock{
-		loc: local,
-		mgr: mgrMap,
-	}
-}
-
 func (n *networkMock) local() *peer.Local {
 	return n.loc
 }
@@ -217,9 +207,10 @@ func (n *networkMock) GetKnownPeers() []*peer.Peer {
 }
 
 func newTestManager(name string, mgrMap map[peer.ID]*manager) *manager {
-	l := log.Named(name)
-	net := newNetworkMock(name, mgrMap, l)
-	m := newManager(net, net.GetKnownPeers, l, Config{})
+	db, _ := peer.NewDB(mapdb.NewMapDB())
+	local := peertest.NewLocal("mock", name, db)
+	networkMock := &networkMock{loc: local, mgr: mgrMap}
+	m := newManager(networkMock, networkMock.GetKnownPeers, log.Named(name), Config{})
 	mgrMap[m.getID()] = m
 	return m
 }
diff --git a/packages/autopeering/selection/neighborhood_test.go b/packages/autopeering/selection/neighborhood_test.go
index 19dc5211694b1429bf6ef6fb1f896d3be27be6fd..04891d91ada0747dc411e3de2f1da22a793497dc 100644
--- a/packages/autopeering/selection/neighborhood_test.go
+++ b/packages/autopeering/selection/neighborhood_test.go
@@ -1,11 +1,13 @@
 package selection
 
 import (
+	"fmt"
 	"testing"
 	"time"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/distance"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/iotaledger/goshimmer/packages/autopeering/salt"
 	"github.com/stretchr/testify/assert"
 )
@@ -13,7 +15,7 @@ import (
 func TestGetFurthest(t *testing.T) {
 	d := make([]peer.PeerDistance, 5)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -59,7 +61,7 @@ func TestGetFurthest(t *testing.T) {
 func TestGetPeerIndex(t *testing.T) {
 	d := make([]peer.PeerDistance, 5)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -102,7 +104,7 @@ func TestGetPeerIndex(t *testing.T) {
 func TestRemove(t *testing.T) {
 	d := make([]peer.PeerDistance, 10)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -145,7 +147,7 @@ func TestRemove(t *testing.T) {
 func TestAdd(t *testing.T) {
 	d := make([]peer.PeerDistance, 10)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -188,7 +190,7 @@ func TestAdd(t *testing.T) {
 func TestUpdateDistance(t *testing.T) {
 	d := make([]peer.PeerDistance, 5)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -214,7 +216,7 @@ func TestGetPeers(t *testing.T) {
 	peers := make([]*peer.Peer, 4)
 
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 		peers[i] = d[i].Remote
 	}
diff --git a/packages/autopeering/selection/protocol_test.go b/packages/autopeering/selection/protocol_test.go
index c2e99035602946d2bfa9b60dfdbe49dd46714881..4df4b46aee42868df889d5c8a88d7e9a1fefd2e1 100644
--- a/packages/autopeering/selection/protocol_test.go
+++ b/packages/autopeering/selection/protocol_test.go
@@ -6,57 +6,25 @@ import (
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/discover"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
-	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/iotaledger/goshimmer/packages/autopeering/salt"
 	"github.com/iotaledger/goshimmer/packages/autopeering/server"
 	"github.com/iotaledger/goshimmer/packages/autopeering/transport"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/iotaledger/hive.go/logger"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-const graceTime = 100 * time.Millisecond
-
-var log = logger.NewExampleLogger("selection")
-
-var peerMap = make(map[peer.ID]*peer.Peer)
-
-// dummyDiscovery is a dummy implementation of DiscoveryProtocol never returning any verified peers.
-type dummyDiscovery struct{}
-
-func (d dummyDiscovery) IsVerified(peer.ID, string) bool                 { return true }
-func (d dummyDiscovery) EnsureVerified(*peer.Peer) error                 { return nil }
-func (d dummyDiscovery) GetVerifiedPeer(id peer.ID, _ string) *peer.Peer { return peerMap[id] }
-func (d dummyDiscovery) GetVerifiedPeers() []*peer.Peer                  { return []*peer.Peer{} }
-
-// newTest creates a new neighborhood server and also returns the teardown.
-func newTest(t require.TestingT, trans transport.Transport) (*server.Server, *Protocol, func()) {
-	l := log.Named(trans.LocalAddr().String())
-
-	services := service.New()
-	services.Update(service.PeeringKey, trans.LocalAddr().Network(), trans.LocalAddr().String())
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
-	require.NoError(t, err)
-
-	// add the new peer to the global map for dummyDiscovery
-	peerMap[local.ID()] = &local.Peer
-
-	prot := New(local, dummyDiscovery{}, Config{Log: l})
-	srv := server.Serve(local, trans, l.Named("srv"), prot)
-	prot.Start(srv)
-
-	teardown := func() {
-		srv.Close()
-		prot.Close()
-		db.Close()
-	}
-	return srv, prot, teardown
-}
+const (
+	testNetwork = "udp"
+	graceTime   = 100 * time.Millisecond
+)
 
-func getPeer(s *server.Server) *peer.Peer {
-	return &s.Local().Peer
-}
+var (
+	log     = logger.NewExampleLogger("discover")
+	peerMap = make(map[peer.ID]*peer.Peer)
+)
 
 func TestProtocol(t *testing.T) {
 	// assure that the default test parameters are used for all protocol tests
@@ -69,14 +37,14 @@ func TestProtocol(t *testing.T) {
 		p2p := transport.P2P()
 		defer p2p.Close()
 
-		srvA, protA, closeA := newTest(t, p2p.A)
+		protA, closeA := newTestProtocol(p2p.A)
 		defer closeA()
-		srvB, protB, closeB := newTest(t, p2p.B)
+		protB, closeB := newTestProtocol(p2p.B)
 		defer closeB()
 
-		peerA := getPeer(srvA)
+		peerA := getPeer(protA)
 		saltA, _ := salt.NewSalt(100 * time.Second)
-		peerB := getPeer(srvB)
+		peerB := getPeer(protB)
 		saltB, _ := salt.NewSalt(100 * time.Second)
 
 		// request peering to peer B
@@ -97,13 +65,13 @@ func TestProtocol(t *testing.T) {
 		p2p := transport.P2P()
 		defer p2p.Close()
 
-		_, protA, closeA := newTest(t, p2p.A)
+		protA, closeA := newTestProtocol(p2p.A)
 		defer closeA()
-		srvB, _, closeB := newTest(t, p2p.B)
+		protB, closeB := newTestProtocol(p2p.B)
 		defer closeB()
 
 		saltA, _ := salt.NewSalt(-1 * time.Second)
-		peerB := getPeer(srvB)
+		peerB := getPeer(protB)
 
 		// request peering to peer B
 		_, err := protA.PeeringRequest(peerB, saltA)
@@ -114,19 +82,19 @@ func TestProtocol(t *testing.T) {
 		p2p := transport.P2P()
 		defer p2p.Close()
 
-		srvA, protA, closeA := newTest(t, p2p.A)
+		protA, closeA := newTestProtocol(p2p.A)
 		defer closeA()
-		srvB, protB, closeB := newTest(t, p2p.B)
+		protB, closeB := newTestProtocol(p2p.B)
 		defer closeB()
 
-		peerA := getPeer(srvA)
+		peerA := getPeer(protA)
 		saltA, _ := salt.NewSalt(100 * time.Second)
-		peerB := getPeer(srvB)
+		peerB := getPeer(protB)
 
 		// request peering to peer B
-		services, err := protA.PeeringRequest(peerB, saltA)
+		status, err := protA.PeeringRequest(peerB, saltA)
 		require.NoError(t, err)
-		assert.NotEmpty(t, services)
+		assert.True(t, status)
 
 		require.Contains(t, protB.GetNeighbors(), peerA)
 
@@ -140,31 +108,56 @@ func TestProtocol(t *testing.T) {
 		p2p := transport.P2P()
 		defer p2p.Close()
 
-		srvA, protA, closeA := newFullTest(t, p2p.A)
+		protA, closeA := newFullTestProtocol(p2p.A)
 		defer closeA()
 
 		time.Sleep(graceTime) // wait for the master to initialize
 
-		srvB, protB, closeB := newFullTest(t, p2p.B, getPeer(srvA))
+		protB, closeB := newFullTestProtocol(p2p.B, getPeer(protA))
 		defer closeB()
 
 		time.Sleep(outboundUpdateInterval + graceTime) // wait for the next outbound cycle
 
 		// the two peers should be peered
-		assert.ElementsMatch(t, []*peer.Peer{getPeer(srvB)}, protA.GetNeighbors())
-		assert.ElementsMatch(t, []*peer.Peer{getPeer(srvA)}, protB.GetNeighbors())
+		assert.ElementsMatch(t, []*peer.Peer{getPeer(protB)}, protA.GetNeighbors())
+		assert.ElementsMatch(t, []*peer.Peer{getPeer(protA)}, protB.GetNeighbors())
 	})
 }
 
-// newTest creates a new server handling discover as well as neighborhood and also returns the teardown.
-func newFullTest(t require.TestingT, trans transport.Transport, masterPeers ...*peer.Peer) (*server.Server, *Protocol, func()) {
+// dummyDiscovery is a dummy implementation of DiscoveryProtocol never returning any verified peers.
+type dummyDiscovery struct{}
+
+func (d dummyDiscovery) IsVerified(peer.ID, string) bool                 { return true }
+func (d dummyDiscovery) EnsureVerified(*peer.Peer) error                 { return nil }
+func (d dummyDiscovery) GetVerifiedPeer(id peer.ID, _ string) *peer.Peer { return peerMap[id] }
+func (d dummyDiscovery) GetVerifiedPeers() []*peer.Peer                  { return []*peer.Peer{} }
+
+// newTestProtocol creates a new neighborhood server and also returns the teardown.
+func newTestProtocol(trans transport.Transport) (*Protocol, func()) {
+	db, _ := peer.NewDB(mapdb.NewMapDB())
+	local := peertest.NewLocal(trans.LocalAddr().Network(), trans.LocalAddr().String(), db)
+	// add the new peer to the global map for dummyDiscovery
+	peerMap[local.ID()] = &local.Peer
 	l := log.Named(trans.LocalAddr().String())
 
-	services := service.New()
-	services.Update(service.PeeringKey, trans.LocalAddr().Network(), trans.LocalAddr().String())
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
-	require.NoError(t, err)
+	prot := New(local, dummyDiscovery{}, Config{Log: l.Named("disc")})
+	srv := server.Serve(local, trans, l.Named("srv"), prot)
+	prot.Start(srv)
+
+	teardown := func() {
+		srv.Close()
+		prot.Close()
+	}
+	return prot, teardown
+}
+
+// newTestProtocol creates a new server handling discover as well as neighborhood and also returns the teardown.
+func newFullTestProtocol(trans transport.Transport, masterPeers ...*peer.Peer) (*Protocol, func()) {
+	db, _ := peer.NewDB(mapdb.NewMapDB())
+	local := peertest.NewLocal(trans.LocalAddr().Network(), trans.LocalAddr().String(), db)
+	// add the new peer to the global map for dummyDiscovery
+	peerMap[local.ID()] = &local.Peer
+	l := log.Named(trans.LocalAddr().String())
 
 	discovery := discover.New(local, discover.Config{
 		Log:         l.Named("disc"),
@@ -183,7 +176,10 @@ func newFullTest(t require.TestingT, trans transport.Transport, masterPeers ...*
 		srv.Close()
 		selection.Close()
 		discovery.Close()
-		db.Close()
 	}
-	return srv, selection, teardown
+	return selection, teardown
+}
+
+func getPeer(p *Protocol) *peer.Peer {
+	return &p.local().Peer
 }
diff --git a/packages/autopeering/selection/selection_test.go b/packages/autopeering/selection/selection_test.go
index cc2235e812dd329534963ec1e4ca475eac57ae8e..49d22c8df577dc20cbd05d38984eb2ac45ea84af 100644
--- a/packages/autopeering/selection/selection_test.go
+++ b/packages/autopeering/selection/selection_test.go
@@ -1,35 +1,18 @@
 package selection
 
 import (
-	"crypto/ed25519"
+	"fmt"
 	"testing"
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
-	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
 	"github.com/stretchr/testify/assert"
 )
 
-const (
-	testNetwork = "udp"
-	testAddress = "127.0.0.1:8000"
-)
-
-func newTestServiceRecord() *service.Record {
-	services := service.New()
-	services.Update(service.PeeringKey, testNetwork, testAddress)
-
-	return services
-}
-
-func newTestPeer() *peer.Peer {
-	key, _, _ := ed25519.GenerateKey(nil)
-	return peer.NewPeer(peer.PublicKey(key), newTestServiceRecord())
-}
-
 func TestFilterAddPeers(t *testing.T) {
 	p := make([]*peer.Peer, 5)
 	for i := range p {
-		p[i] = newTestPeer()
+		p[i] = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 	}
 
 	type testCase struct {
@@ -64,7 +47,7 @@ func TestFilterAddPeers(t *testing.T) {
 func TestFilterRemovePeers(t *testing.T) {
 	p := make([]*peer.Peer, 5)
 	for i := range p {
-		p[i] = newTestPeer()
+		p[i] = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 	}
 
 	type testCase struct {
@@ -100,7 +83,7 @@ func TestFilterRemovePeers(t *testing.T) {
 func TestFilterApply(t *testing.T) {
 	d := make([]peer.PeerDistance, 5)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
@@ -134,7 +117,7 @@ func TestFilterApply(t *testing.T) {
 func TestSelection(t *testing.T) {
 	d := make([]peer.PeerDistance, 10)
 	for i := range d {
-		d[i].Remote = newTestPeer()
+		d[i].Remote = peertest.NewPeer(testNetwork, fmt.Sprintf("%d", i))
 		d[i].Distance = uint32(i + 1)
 	}
 
diff --git a/packages/autopeering/server/server_test.go b/packages/autopeering/server/server_test.go
index e6725fc0facfcc568ecc20b5277d09fdd089cacb..8fc6022f5d59fa2e7d71d6b85886421f11689b5c 100644
--- a/packages/autopeering/server/server_test.go
+++ b/packages/autopeering/server/server_test.go
@@ -8,6 +8,7 @@ import (
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
 	"github.com/iotaledger/goshimmer/packages/autopeering/salt"
 	"github.com/iotaledger/goshimmer/packages/autopeering/transport"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/iotaledger/hive.go/logger"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/mock"
@@ -96,10 +97,16 @@ func unmarshal(data []byte) (Message, error) {
 	return nil, ErrInvalidMessage
 }
 
+func newTestDB(t require.TestingT) *peer.DB {
+	db, err := peer.NewDB(mapdb.NewMapDB())
+	require.NoError(t, err)
+	return db
+}
+
 func TestSrvEncodeDecodePing(t *testing.T) {
 	services := service.New()
 	services.Update(service.PeeringKey, "dummy", "local")
-	local, err := peer.NewLocal(services, peer.NewMemoryDB(log))
+	local, err := peer.NewLocal(services, newTestDB(t))
 	require.NoError(t, err)
 	s := &Server{local: local}
 
@@ -119,8 +126,7 @@ func newTestServer(t require.TestingT, name string, trans transport.Transport) (
 
 	services := service.New()
 	services.Update(service.PeeringKey, trans.LocalAddr().Network(), trans.LocalAddr().String())
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
+	local, err := peer.NewLocal(services, newTestDB(t))
 	require.NoError(t, err)
 
 	s, _ := salt.NewSalt(100 * time.Second)
@@ -130,11 +136,7 @@ func newTestServer(t require.TestingT, name string, trans transport.Transport) (
 
 	srv := Serve(local, trans, l, HandlerFunc(handle))
 
-	teardown := func() {
-		srv.Close()
-		db.Close()
-	}
-	return srv, teardown
+	return srv, srv.Close
 }
 
 func sendPing(s *Server, p *peer.Peer) error {
diff --git a/packages/database/database.go b/packages/database/database.go
index 4da5818cd93cde477656d97f9d069267d0f2ccba..793b394489253e1f044c0101c39634096bbc13f8 100644
--- a/packages/database/database.go
+++ b/packages/database/database.go
@@ -23,6 +23,7 @@ type (
 	Entry        = database.Entry
 	KeyOnlyEntry = database.KeyOnlyEntry
 	KeyPrefix    = database.KeyPrefix
+	Key          = database.Key
 	Value        = database.Value
 )
 
diff --git a/packages/database/mapdb/mapdb.go b/packages/database/mapdb/mapdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..e9627130049cc2b2a7bcba0787c2c30036d420ab
--- /dev/null
+++ b/packages/database/mapdb/mapdb.go
@@ -0,0 +1,220 @@
+// Package mapdb provides a map implementation of a key value database.
+// It offers a lightweight drop-in replacement of  hive.go/database for tests or in simulations
+// where more than one instance is required.
+package mapdb
+
+import (
+	"strings"
+	"sync"
+
+	"github.com/iotaledger/hive.go/database"
+	"github.com/iotaledger/hive.go/typeutils"
+)
+
+// MapDB is a simple implementation of DB using a map.
+type MapDB struct {
+	mu sync.RWMutex
+	m  map[string]mapEntry
+}
+
+type mapEntry struct {
+	value []byte
+	meta  byte
+}
+
+// NewMapDB creates a database.Database implementation purely based on a go map.
+// MapDB does not support TTL.
+func NewMapDB() *MapDB {
+	return &MapDB{
+		m: make(map[string]mapEntry),
+	}
+}
+
+func (db *MapDB) Contains(key database.Key) (contains bool, err error) {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	_, contains = db.m[typeutils.BytesToString(key)]
+	return
+}
+
+func (db *MapDB) Get(key database.Key) (entry database.Entry, err error) {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	ent, contains := db.m[typeutils.BytesToString(key)]
+	if !contains {
+		err = database.ErrKeyNotFound
+		return
+	}
+	entry.Key = key
+	entry.Value = append([]byte{}, ent.value...)
+	entry.Meta = ent.meta
+	return
+}
+
+func (db *MapDB) GetKeyOnly(key database.Key) (entry database.KeyOnlyEntry, err error) {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	ent, contains := db.m[typeutils.BytesToString(key)]
+	if !contains {
+		err = database.ErrKeyNotFound
+		return
+	}
+	entry.Key = key
+	entry.Meta = ent.meta
+	return
+}
+
+func (db *MapDB) Set(entry database.Entry) error {
+	db.mu.Lock()
+	defer db.mu.Unlock()
+
+	db.m[typeutils.BytesToString(entry.Key)] = mapEntry{
+		value: append([]byte{}, entry.Value...),
+		meta:  entry.Meta,
+	}
+	return nil
+}
+
+func (db *MapDB) Delete(key database.Key) error {
+	db.mu.Lock()
+	defer db.mu.Unlock()
+
+	delete(db.m, typeutils.BytesToString(key))
+	return nil
+}
+
+func (db *MapDB) DeletePrefix(keyPrefix database.KeyPrefix) error {
+	db.mu.Lock()
+	defer db.mu.Unlock()
+
+	prefix := typeutils.BytesToString(keyPrefix)
+	for key := range db.m {
+		if strings.HasPrefix(key, prefix) {
+			delete(db.m, key)
+		}
+	}
+	return nil
+}
+
+func (db *MapDB) ForEach(consume func(entry database.Entry) bool) error {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	for key, ent := range db.m {
+		entry := database.Entry{
+			Key:   []byte(key),
+			Value: append([]byte{}, ent.value...),
+			Meta:  ent.meta,
+		}
+		if consume(entry) {
+			break
+		}
+	}
+	return nil
+}
+
+func (db *MapDB) ForEachKeyOnly(consume func(entry database.KeyOnlyEntry) bool) error {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	for key, ent := range db.m {
+		entry := database.KeyOnlyEntry{
+			Key:  []byte(key),
+			Meta: ent.meta,
+		}
+		if consume(entry) {
+			break
+		}
+	}
+	return nil
+}
+
+func (db *MapDB) ForEachPrefix(keyPrefix database.KeyPrefix, consume func(entry database.Entry) (stop bool)) error {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	prefix := typeutils.BytesToString(keyPrefix)
+	for key, ent := range db.m {
+		if strings.HasPrefix(key, prefix) {
+			entry := database.Entry{
+				Key:   []byte(strings.TrimPrefix(key, prefix)),
+				Value: append([]byte{}, ent.value...),
+				Meta:  ent.meta,
+			}
+			if consume(entry) {
+				break
+			}
+		}
+	}
+	return nil
+}
+
+func (db *MapDB) ForEachPrefixKeyOnly(keyPrefix database.KeyPrefix, consume func(entry database.KeyOnlyEntry) (stop bool)) error {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+
+	prefix := typeutils.BytesToString(keyPrefix)
+	for key, ent := range db.m {
+		if strings.HasPrefix(key, prefix) {
+			entry := database.KeyOnlyEntry{
+				Key:  []byte(strings.TrimPrefix(key, prefix)),
+				Meta: ent.meta,
+			}
+			if consume(entry) {
+				break
+			}
+		}
+	}
+	return nil
+}
+
+func (db *MapDB) StreamForEach(consume func(entry database.Entry) error) (err error) {
+	_ = db.ForEach(func(entry database.Entry) bool {
+		err = consume(entry)
+		return err != nil
+	})
+	return
+}
+
+func (db *MapDB) StreamForEachKeyOnly(consume func(entry database.KeyOnlyEntry) error) (err error) {
+	_ = db.ForEachKeyOnly(func(entry database.KeyOnlyEntry) bool {
+		err = consume(entry)
+		return err != nil
+	})
+	return
+}
+
+func (db *MapDB) StreamForEachPrefix(keyPrefix database.KeyPrefix, consume func(entry database.Entry) error) (err error) {
+	_ = db.ForEachPrefix(keyPrefix, func(entry database.Entry) bool {
+		err = consume(entry)
+		return err != nil
+	})
+	return
+}
+
+func (db *MapDB) StreamForEachPrefixKeyOnly(keyPrefix database.KeyPrefix, consume func(database.KeyOnlyEntry) error) (err error) {
+	_ = db.ForEachPrefixKeyOnly(keyPrefix, func(entry database.KeyOnlyEntry) bool {
+		err = consume(entry)
+		return err != nil
+	})
+	return
+}
+
+func (db *MapDB) Apply(set []database.Entry, del []database.Key) error {
+	db.mu.Lock()
+	defer db.mu.Unlock()
+
+	for _, entry := range set {
+		db.m[typeutils.BytesToString(entry.Key)] = mapEntry{
+			value: append([]byte{}, entry.Value...),
+			meta:  entry.Meta,
+		}
+	}
+	for _, key := range del {
+		delete(db.m, typeutils.BytesToString(key))
+	}
+	return nil
+}
diff --git a/packages/gossip/manager_test.go b/packages/gossip/manager_test.go
index ea19d48097cd9c1ec16ccdecef5e4a66ab6f249c..f4b25a2943ea8bb32c14c2923347c234895733cb 100644
--- a/packages/gossip/manager_test.go
+++ b/packages/gossip/manager_test.go
@@ -9,6 +9,7 @@ import (
 	"github.com/golang/protobuf/proto"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	pb "github.com/iotaledger/goshimmer/packages/gossip/proto"
 	"github.com/iotaledger/goshimmer/packages/gossip/server"
 	"github.com/iotaledger/hive.go/events"
@@ -369,13 +370,18 @@ func TestTxRequest(t *testing.T) {
 	e.AssertExpectations(t)
 }
 
+func newTestDB(t require.TestingT) *peer.DB {
+	db, err := peer.NewDB(mapdb.NewMapDB())
+	require.NoError(t, err)
+	return db
+}
+
 func newTestManager(t require.TestingT, name string) (*Manager, func(), *peer.Peer) {
 	l := log.Named(name)
 
 	services := service.New()
 	services.Update(service.PeeringKey, "peering", name)
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
+	local, err := peer.NewLocal(services, newTestDB(t))
 	require.NoError(t, err)
 
 	laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
@@ -396,7 +402,6 @@ func newTestManager(t require.TestingT, name string) (*Manager, func(), *peer.Pe
 		mgr.Close()
 		srv.Close()
 		_ = lis.Close()
-		db.Close()
 	}
 	return mgr, detach, &local.Peer
 }
diff --git a/packages/gossip/server/server_test.go b/packages/gossip/server/server_test.go
index 5f2778a10e2aad2b20061ff0473c46f847403a2a..841552972abf0cc50cd95612ac9179736511bfbd 100644
--- a/packages/gossip/server/server_test.go
+++ b/packages/gossip/server/server_test.go
@@ -8,6 +8,7 @@ import (
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/database/mapdb"
 	"github.com/iotaledger/hive.go/logger"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -164,13 +165,18 @@ func TestWrongConnect(t *testing.T) {
 	wg.Wait()
 }
 
+func newTestDB(t require.TestingT) *peer.DB {
+	db, err := peer.NewDB(mapdb.NewMapDB())
+	require.NoError(t, err)
+	return db
+}
+
 func newTestServer(t require.TestingT, name string) (*TCP, func()) {
 	l := log.Named(name)
 
 	services := service.New()
 	services.Update(service.PeeringKey, "peering", name)
-	db := peer.NewMemoryDB(l.Named("db"))
-	local, err := peer.NewLocal(services, db)
+	local, err := peer.NewLocal(services, newTestDB(t))
 	require.NoError(t, err)
 
 	laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
@@ -186,7 +192,6 @@ func newTestServer(t require.TestingT, name string) (*TCP, func()) {
 	teardown := func() {
 		srv.Close()
 		_ = lis.Close()
-		db.Close()
 	}
 	return srv, teardown
 }
diff --git a/packages/shutdown/order.go b/packages/shutdown/order.go
index 51ed31ec8d11251d947628842d80542e7f05d7dd..715bbb038818a8aae85f76c105ea3d8942e69b97 100644
--- a/packages/shutdown/order.go
+++ b/packages/shutdown/order.go
@@ -6,10 +6,10 @@ const (
 	ShutdownPriorityBundleProcessor
 	ShutdownPriorityAnalysis
 	ShutdownPriorityMetrics
-	ShutdownPriorityWebAPI
+	ShutdownPriorityAutopeering
 	ShutdownPriorityGossip
+	ShutdownPriorityWebAPI
 	ShutdownPriorityZMQ
-	ShutdownPriorityAutopeering
 	ShutdownPriorityGraph
 	ShutdownPriorityUI
 	ShutdownPriorityDashboard
diff --git a/plugins/autopeering/autopeering.go b/plugins/autopeering/autopeering.go
index 2d578fc9fbbe41bf8ed96bc74fd24ba0a76febe4..ca3b3de98cd8f5496a151458d1b99977a0205b71 100644
--- a/plugins/autopeering/autopeering.go
+++ b/plugins/autopeering/autopeering.go
@@ -125,11 +125,14 @@ func start(shutdownSignal <-chan struct{}) {
 		defer Selection.Close()
 	}
 
-	log.Infof(name+" started: Address=%s/%s", peeringAddr.String(), peeringAddr.Network())
-	log.Infof(name+" started: ID=%s PublicKey=%s", lPeer.ID(), base64.StdEncoding.EncodeToString(lPeer.PublicKey()))
+	log.Infof("%s started: ID=%s Address=%s/%s", name, lPeer.ID(), peeringAddr.String(), peeringAddr.Network())
 
 	<-shutdownSignal
-	log.Info("Stopping " + name + " ...")
+
+	log.Infof("Stopping %s ...", name)
+
+	count := lPeer.Database().PersistSeeds()
+	log.Infof("%d peers persisted as seeds", count)
 }
 
 func parseEntryNodes() (result []*peer.Peer, err error) {
diff --git a/plugins/autopeering/local/local.go b/plugins/autopeering/local/local.go
index 6b694b72c524d1bb17387d79313ed2510495623c..65c713311ad4fb8827652f5d53d12c86b5a2290a 100644
--- a/plugins/autopeering/local/local.go
+++ b/plugins/autopeering/local/local.go
@@ -10,6 +10,7 @@ import (
 
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer"
 	"github.com/iotaledger/goshimmer/packages/autopeering/peer/service"
+	"github.com/iotaledger/goshimmer/packages/database"
 	"github.com/iotaledger/goshimmer/packages/netutil"
 	"github.com/iotaledger/goshimmer/packages/parameter"
 	"github.com/iotaledger/hive.go/logger"
@@ -64,8 +65,16 @@ func configureLocal() *peer.Local {
 		}
 		seed = append(seed, bytes)
 	}
+	badgerDB, err := database.Get(database.DBPrefixAutoPeering, database.GetBadgerInstance())
+	if err != nil {
+		log.Fatalf("Error loading DB: %s", err)
+	}
+	peerDB, err := peer.NewDB(badgerDB)
+	if err != nil {
+		log.Fatalf("Error creating peer DB: %s", err)
+	}
 
-	local, err := peer.NewLocal(services, peer.NewPersistentDB(log), seed...)
+	local, err := peer.NewLocal(services, peerDB, seed...)
 	if err != nil {
 		log.Fatalf("Error creating local: %s", err)
 	}