Skip to content
Snippets Groups Projects
Unverified Commit 1134f3fd authored by Luca Moser's avatar Luca Moser Committed by GitHub
Browse files

Adds changes for v0.1.2 (#266)


* New binary tangle using atomic transactions (#239)

* Feat: started porting the new binary stuff

* Refactor: removed unnecessary folder

* Refactor: cleaned up go.mod files

* Fix: removed objectsdb files

* Feat: added transactionrequester package

* Adds the transactionParser as a fliter and pre-processing mechanism of the tangle. (#242)

* Feat: started porting the new binary stuff

* Refactor: removed unnecessary folder

* Refactor: cleaned up go.mod files

* Fix: removed objectsdb files

* Feat: added transactionrequester package

* Feat: added new transactionparser as the filter for the tangle

* Feat: Use hive.go autopeering (#250)

* use autopeering from hive.go
* update hive.go

* update hive.go

* Adds the TransactionRequester and some refactors (#256)

* Feat: started porting the new binary stuff

* Refactor: removed unnecessary folder

* Refactor: cleaned up go.mod files

* Fix: removed objectsdb files

* Feat: added transactionrequester package

* Feat: added new transactionparser as the filter for the tangle

* Refactor: removed duplicate code

* Fix: Log dropping packets every 1000 drops (#255)

* Fix: Log dropping packets every 1000 drops

* use an atomic counter for dropped message because Write() could be called concurrently

* removes redundant zero

* Add external check in test

Co-authored-by: default avatarLuca Moser <moser.luca@gmail.com>

* Fix: Update pprof port (#244)

Since both Hornet and Goshimmer might be running on the same machine, update the port so that we can get debug information for both.

* Remove docker specific config (#245)

* :arrow_up: upgrades hive.go

* :bug: checks meta_tx size

* :heavy_minus_sign: removes dependencies

* :sparkles: enables local testing

* :sparkles: adds config plugin

* Extend remote log message (#260)

* Bump up Logstash and ElasticSearch memory #251

* Add version and if available GIT information to remote log message #251

* :bug:

 fixes flag parsing before loading config file

* Feat: Add --version to cli (#264)

* Adds changelog entry for v0.1.2 and bumps version number (#265)

* updates changelog for v0.1.2 release

* bumps version number to v0.1.2

Co-authored-by: default avatarHans Moog <hm@mkjc.net>
Co-authored-by: default avatarWolfgang Welz <welzwo@gmail.com>
Co-authored-by: default avatarjkrvivian <jkrvivian@gmail.com>
Co-authored-by: default avatarDave <44786846+centercirclesolutions@users.noreply.github.com>
Co-authored-by: default avatarAngelo Capossele <angelocapossele@gmail.com>
Co-authored-by: default avatarJonas Theis <mail@jonastheis.de>
parent 5d92569b
No related branches found
No related tags found
No related merge requests found
Showing
with 0 additions and 2532 deletions
package salt
import (
"crypto/rand"
"fmt"
"sync"
"time"
"github.com/golang/protobuf/proto"
pb "github.com/iotaledger/goshimmer/packages/autopeering/salt/proto"
)
// SaltByteSize specifies the number of bytes used for the salt.
const SaltByteSize = 20
// Salt encapsulates high level functions around salt management.
type Salt struct {
bytes []byte // value of the salt
expirationTime time.Time // expiration time of the salt
mutex sync.RWMutex
}
// NewSalt generates a new salt given a lifetime duration
func NewSalt(lifetime time.Duration) (salt *Salt, err error) {
salt = &Salt{
bytes: make([]byte, SaltByteSize),
expirationTime: time.Now().Add(lifetime),
}
if _, err = rand.Read(salt.bytes); err != nil {
return nil, err
}
return salt, err
}
func (s *Salt) GetBytes() []byte {
s.mutex.RLock()
defer s.mutex.RUnlock()
return append([]byte{}, s.bytes...)
}
func (s *Salt) GetExpiration() time.Time {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.expirationTime
}
// Expired returns true if the given salt expired
func (s *Salt) Expired() bool {
return time.Now().After(s.GetExpiration())
}
// ToProto encodes the Salt into a proto buffer Salt message
func (s *Salt) ToProto() *pb.Salt {
return &pb.Salt{
Bytes: s.bytes,
ExpTime: uint64(s.expirationTime.Unix()),
}
}
// FromProto decodes a given proto buffer Salt message (in) and returns the corresponding Salt.
func FromProto(in *pb.Salt) (*Salt, error) {
if l := len(in.GetBytes()); l != SaltByteSize {
return nil, fmt.Errorf("invalid salt length: %d, need %d", l, SaltByteSize)
}
out := &Salt{
bytes: in.GetBytes(),
expirationTime: time.Unix(int64(in.GetExpTime()), 0),
}
return out, nil
}
// Marshal serializes a given salt (s) into a slice of bytes (data)
func (s *Salt) Marshal() ([]byte, error) {
return proto.Marshal(s.ToProto())
}
// Unmarshal de-serializes a given slice of bytes (data) into a Salt.
func Unmarshal(data []byte) (*Salt, error) {
s := &pb.Salt{}
if err := proto.Unmarshal(data, s); err != nil {
return nil, err
}
return FromProto(s)
}
package salt
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewSalt(t *testing.T) {
type testCase struct {
input time.Duration
want error
}
tests := []testCase{
{input: 0, want: nil},
{input: 10, want: nil},
{input: -1, want: nil},
}
for _, test := range tests {
_, err := NewSalt(test.input)
assert.Equal(t, test.want, err, test)
}
}
func TestSaltExpired(t *testing.T) {
type testCase struct {
input time.Duration
want bool
}
tests := []testCase{
{input: 0, want: true},
{input: time.Second * 10, want: false},
{input: -1, want: true},
}
for _, test := range tests {
salt, _ := NewSalt(test.input)
got := salt.Expired()
assert.Equal(t, test.want, got, test)
}
}
func TestMarshalUnmarshal(t *testing.T) {
type testCase struct {
input time.Duration
}
tests := []testCase{
{input: 0},
{input: time.Second * 10},
{input: -1},
}
for _, test := range tests {
salt, _ := NewSalt(test.input)
data, err := salt.Marshal()
require.Equal(t, nil, err, "NoErrorCheck")
got, err := Unmarshal(data)
require.Equal(t, nil, err, "NoErrorCheck")
assert.Equal(t, salt.GetBytes(), got.GetBytes(), "Salt")
assert.Equal(t, salt.GetExpiration().Unix(), got.GetExpiration().Unix(), "SameSaltExpirationTime")
}
}
package selection
import (
"time"
"github.com/iotaledger/goshimmer/packages/autopeering/peer"
"github.com/iotaledger/hive.go/logger"
)
// Default values for the global parameters
const (
DefaultInboundNeighborSize = 4
DefaultOutboundNeighborSize = 4
DefaultSaltLifetime = 30 * time.Minute
DefaultOutboundUpdateInterval = 1 * time.Second
DefaultFullOutboundUpdateInterval = 1 * time.Minute
)
var (
inboundNeighborSize = DefaultInboundNeighborSize // number of inbound neighbors
outboundNeighborSize = DefaultOutboundNeighborSize // number of outbound neighbors
saltLifetime = DefaultSaltLifetime // lifetime of the private and public local salt
outboundUpdateInterval = DefaultOutboundUpdateInterval // time after which out neighbors are updated
fullOutboundUpdateInterval = DefaultFullOutboundUpdateInterval // time after which full out neighbors are updated
)
// Config holds settings for the peer selection.
type Config struct {
// Logger
Log *logger.Logger
// These settings are optional:
DropOnUpdate bool // set true to drop all neighbors when the salt is updated
NeighborValidator Validator // potential neighbor validator
}
// A Validator checks whether a peer is a valid neighbor
type Validator interface {
IsValid(*peer.Peer) bool
}
// The ValidatorFunc type is an adapter to allow the use of ordinary functions as neighbor validators.
// If f is a function with the appropriate signature, ValidatorFunc(f) is a Validator that calls f.
type ValidatorFunc func(p *peer.Peer) bool
// IsValid calls f(p).
func (f ValidatorFunc) IsValid(p *peer.Peer) bool { return f(p) }
// Parameters holds the parameters that can be configured.
type Parameters struct {
InboundNeighborSize int // number of inbound neighbors
OutboundNeighborSize int // number of outbound neighbors
SaltLifetime time.Duration // lifetime of the private and public local salt
OutboundUpdateInterval time.Duration // time interval after which the outbound neighbors are checked
FullOutboundUpdateInterval time.Duration // time after which the full outbound neighbors are updated
}
// SetParameters sets the global parameters for this package.
// This function cannot be used concurrently.
func SetParameters(param Parameters) {
if param.InboundNeighborSize > 0 {
inboundNeighborSize = param.InboundNeighborSize
} else {
inboundNeighborSize = DefaultInboundNeighborSize
}
if param.OutboundNeighborSize > 0 {
outboundNeighborSize = param.OutboundNeighborSize
} else {
outboundNeighborSize = DefaultOutboundNeighborSize
}
if param.SaltLifetime > 0 {
saltLifetime = param.SaltLifetime
} else {
saltLifetime = DefaultSaltLifetime
}
if param.OutboundUpdateInterval > 0 {
outboundUpdateInterval = param.OutboundUpdateInterval
} else {
outboundUpdateInterval = DefaultOutboundUpdateInterval
}
if param.FullOutboundUpdateInterval > 0 {
fullOutboundUpdateInterval = param.FullOutboundUpdateInterval
} else {
fullOutboundUpdateInterval = DefaultFullOutboundUpdateInterval
}
}
package selection
import (
"github.com/iotaledger/goshimmer/packages/autopeering/peer"
"github.com/iotaledger/goshimmer/packages/autopeering/salt"
"github.com/iotaledger/hive.go/events"
)
// Events contains all the events that are triggered during the neighbor selection.
var Events = struct {
// A SaltUpdated event is triggered, when the private and public salt were updated.
SaltUpdated *events.Event
// An OutgoingPeering event is triggered, when a valid response of PeeringRequest has been received.
OutgoingPeering *events.Event
// An IncomingPeering event is triggered, when a valid PeerRequest has been received.
IncomingPeering *events.Event
// A Dropped event is triggered, when a neighbor is dropped or when a drop message is received.
Dropped *events.Event
}{
SaltUpdated: events.NewEvent(saltUpdatedCaller),
OutgoingPeering: events.NewEvent(peeringCaller),
IncomingPeering: events.NewEvent(peeringCaller),
Dropped: events.NewEvent(droppedCaller),
}
// SaltUpdatedEvent bundles the information sent in the SaltUpdated event.
type SaltUpdatedEvent struct {
Self peer.ID // ID of the peer triggering the event.
Public, Private *salt.Salt // the updated salt
}
// PeeringEvent bundles the information sent in the OutgoingPeering and IncomingPeering event.
type PeeringEvent struct {
Self peer.ID // ID of the peer triggering the event.
Peer *peer.Peer // peering partner
Status bool // true, when the peering partner has accepted the request
}
// DroppedEvent bundles the information sent in Dropped events.
type DroppedEvent struct {
Self peer.ID // ID of the peer triggering the event.
DroppedID peer.ID // ID of the peer that gets dropped.
}
func saltUpdatedCaller(handler interface{}, params ...interface{}) {
handler.(func(*SaltUpdatedEvent))(params[0].(*SaltUpdatedEvent))
}
func peeringCaller(handler interface{}, params ...interface{}) {
handler.(func(*PeeringEvent))(params[0].(*PeeringEvent))
}
func droppedCaller(handler interface{}, params ...interface{}) {
handler.(func(*DroppedEvent))(params[0].(*DroppedEvent))
}
package selection
import (
"sync"
"time"
"github.com/iotaledger/goshimmer/packages/autopeering/peer"
"github.com/iotaledger/goshimmer/packages/autopeering/salt"
"github.com/iotaledger/hive.go/logger"
)
const (
accept = true
reject = false
// buffer size of the channels handling inbound requests and drops.
queueSize = 10
)
// A network represents the communication layer for the manager.
type network interface {
local() *peer.Local
PeeringRequest(*peer.Peer, *salt.Salt) (bool, error)
PeeringDrop(*peer.Peer)
}
type peeringRequest struct {
peer *peer.Peer
salt *salt.Salt
}
type manager struct {
net network
getPeersToConnect func() []*peer.Peer
log *logger.Logger
dropOnUpdate bool // set true to drop all neighbors when the salt is updated
neighborValidator Validator // potential neighbor validator
inbound *Neighborhood
outbound *Neighborhood
rejectionFilter *Filter
dropChan chan peer.ID
requestChan chan peeringRequest
replyChan chan bool
wg sync.WaitGroup
closing chan struct{}
}
func newManager(net network, peersFunc func() []*peer.Peer, log *logger.Logger, config Config) *manager {
return &manager{
net: net,
getPeersToConnect: peersFunc,
log: log,
dropOnUpdate: config.DropOnUpdate,
neighborValidator: config.NeighborValidator,
inbound: NewNeighborhood(inboundNeighborSize),
outbound: NewNeighborhood(outboundNeighborSize),
rejectionFilter: NewFilter(),
dropChan: make(chan peer.ID, queueSize),
requestChan: make(chan peeringRequest, queueSize),
replyChan: make(chan bool, 1),
closing: make(chan struct{}),
}
}
func (m *manager) start() {
if m.getPublicSalt() == nil || m.getPrivateSalt() == nil {
m.updateSalt()
}
m.wg.Add(1)
go m.loop()
}
func (m *manager) close() {
close(m.closing)
m.wg.Wait()
}
func (m *manager) getID() peer.ID {
return m.net.local().ID()
}
func (m *manager) getPublicSalt() *salt.Salt {
return m.net.local().GetPublicSalt()
}
func (m *manager) getPrivateSalt() *salt.Salt {
return m.net.local().GetPrivateSalt()
}
func (m *manager) getNeighbors() []*peer.Peer {
var neighbors []*peer.Peer
neighbors = append(neighbors, m.inbound.GetPeers()...)
neighbors = append(neighbors, m.outbound.GetPeers()...)
return neighbors
}
func (m *manager) getInNeighbors() []*peer.Peer {
return m.inbound.GetPeers()
}
func (m *manager) getOutNeighbors() []*peer.Peer {
return m.outbound.GetPeers()
}
func (m *manager) requestPeering(p *peer.Peer, s *salt.Salt) bool {
var status bool
select {
case m.requestChan <- peeringRequest{p, s}:
status = <-m.replyChan
default:
// a full queue should count as a failed request
status = false
}
return status
}
func (m *manager) removeNeighbor(id peer.ID) {
m.dropChan <- id
}
func (m *manager) loop() {
defer m.wg.Done()
var updateOutResultChan chan peer.PeerDistance
updateTimer := time.NewTimer(0) // setting this to 0 will cause a trigger right away
defer updateTimer.Stop()
Loop:
for {
select {
// update the outbound neighbors
case <-updateTimer.C:
updateOutResultChan = make(chan peer.PeerDistance)
// check salt and update if necessary
if m.getPublicSalt().Expired() {
m.updateSalt()
}
// check for new peers to connect to in a separate go routine
go m.updateOutbound(updateOutResultChan)
// handle the result of updateOutbound
case req := <-updateOutResultChan:
if req.Remote != nil {
// if the peer is already in inbound, do not add it and remove it from inbound
if p := m.inbound.RemovePeer(req.Remote.ID()); p != nil {
m.triggerPeeringEvent(true, req.Remote, false)
m.dropPeering(p)
} else {
m.addNeighbor(m.outbound, req)
m.triggerPeeringEvent(true, req.Remote, true)
}
}
// call updateOutbound again after the given interval
updateOutResultChan = nil
updateTimer.Reset(m.getUpdateTimeout())
// handle a drop request
case id := <-m.dropChan:
droppedPeer := m.inbound.RemovePeer(id)
if p := m.outbound.RemovePeer(id); p != nil {
droppedPeer = p
m.rejectionFilter.AddPeer(id)
// if not yet updating, trigger an immediate update
if updateOutResultChan == nil && updateTimer.Stop() {
updateTimer.Reset(0)
}
}
if droppedPeer != nil {
m.dropPeering(droppedPeer)
}
// handle an inbound request
case req := <-m.requestChan:
status := m.handleInRequest(req)
// trigger in the main loop to guarantee order of events
m.triggerPeeringEvent(false, req.peer, status)
// on close, exit the loop
case <-m.closing:
break Loop
}
}
// wait for the updateOutbound to finish
if updateOutResultChan != nil {
<-updateOutResultChan
}
}
func (m *manager) getUpdateTimeout() time.Duration {
result := outboundUpdateInterval
if m.outbound.IsFull() {
result = fullOutboundUpdateInterval
}
saltExpiration := time.Until(m.getPublicSalt().GetExpiration())
if saltExpiration < result {
result = saltExpiration
}
return result
}
// updateOutbound updates outbound neighbors.
func (m *manager) updateOutbound(resultChan chan<- peer.PeerDistance) {
var result peer.PeerDistance
defer func() { resultChan <- result }() // assure that a result is always sent to the channel
// sort verified peers by distance
distList := peer.SortBySalt(m.getID().Bytes(), m.getPublicSalt().GetBytes(), m.getPeersToConnect())
// filter out current neighbors
filter := m.getConnectedFilter()
if m.neighborValidator != nil {
filter.AddCondition(m.neighborValidator.IsValid)
}
// filter out previous rejections
filteredList := m.rejectionFilter.Apply(filter.Apply(distList))
if len(filteredList) == 0 {
return
}
// select new candidate
candidate := m.outbound.Select(filteredList)
if candidate.Remote == nil {
return
}
status, err := m.net.PeeringRequest(candidate.Remote, m.getPublicSalt())
if err != nil {
m.rejectionFilter.AddPeer(candidate.Remote.ID())
m.log.Debugw("error requesting peering",
"id", candidate.Remote.ID(),
"addr", candidate.Remote.Address(), "err", err,
)
return
}
if !status {
m.rejectionFilter.AddPeer(candidate.Remote.ID())
m.triggerPeeringEvent(true, candidate.Remote, false)
return
}
result = candidate
}
func (m *manager) handleInRequest(req peeringRequest) (resp bool) {
resp = reject
defer func() { m.replyChan <- resp }() // assure that a response is always issued
if !m.isValidNeighbor(req.peer) {
return
}
reqDistance := peer.NewPeerDistance(m.getID().Bytes(), m.getPrivateSalt().GetBytes(), req.peer)
filter := m.getConnectedFilter()
filteredList := filter.Apply([]peer.PeerDistance{reqDistance})
if len(filteredList) == 0 {
return
}
toAccept := m.inbound.Select(filteredList)
if toAccept.Remote == nil {
return
}
m.addNeighbor(m.inbound, toAccept)
resp = accept
return
}
func (m *manager) addNeighbor(nh *Neighborhood, toAdd peer.PeerDistance) {
// drop furthest neighbor if necessary
if furthest, _ := nh.getFurthest(); furthest.Remote != nil {
if p := nh.RemovePeer(furthest.Remote.ID()); p != nil {
m.dropPeering(p)
}
}
nh.Add(toAdd)
}
func (m *manager) updateSalt() {
public, _ := salt.NewSalt(saltLifetime)
m.net.local().SetPublicSalt(public)
private, _ := salt.NewSalt(saltLifetime)
m.net.local().SetPrivateSalt(private)
// clean the rejection filter
m.rejectionFilter.Clean()
if !m.dropOnUpdate { // update distance without dropping neighbors
m.outbound.UpdateDistance(m.getID().Bytes(), m.getPublicSalt().GetBytes())
m.inbound.UpdateDistance(m.getID().Bytes(), m.getPrivateSalt().GetBytes())
} else { // drop all the neighbors
m.dropNeighborhood(m.inbound)
m.dropNeighborhood(m.outbound)
}
m.log.Debugw("salt updated",
"public", saltLifetime,
"private", saltLifetime,
)
Events.SaltUpdated.Trigger(&SaltUpdatedEvent{Self: m.getID(), Public: public, Private: private})
}
func (m *manager) dropNeighborhood(nh *Neighborhood) {
for _, p := range nh.GetPeers() {
nh.RemovePeer(p.ID())
m.dropPeering(p)
}
}
// dropPeering sends the peering drop over the network and triggers the corresponding event.
func (m *manager) dropPeering(p *peer.Peer) {
m.net.PeeringDrop(p)
m.log.Debugw("peering dropped",
"id", p.ID(),
"#out", m.outbound,
"#in", m.inbound,
)
Events.Dropped.Trigger(&DroppedEvent{Self: m.getID(), DroppedID: p.ID()})
}
func (m *manager) getConnectedFilter() *Filter {
filter := NewFilter()
filter.AddPeer(m.getID()) //set filter for oneself
filter.AddPeers(m.inbound.GetPeers()) // set filter for inbound neighbors
filter.AddPeers(m.outbound.GetPeers()) // set filter for outbound neighbors
return filter
}
// isValidNeighbor returns whether the given peer is a valid neighbor candidate.
func (m *manager) isValidNeighbor(p *peer.Peer) bool {
// do not connect to oneself
if m.getID() == p.ID() {
return false
}
if m.neighborValidator == nil {
return true
}
return m.neighborValidator.IsValid(p)
}
func (m *manager) triggerPeeringEvent(isOut bool, p *peer.Peer, status bool) {
if isOut {
m.log.Debugw("peering requested",
"direction", "out",
"status", status,
"to", p.ID(),
"#out", m.outbound,
"#in", m.inbound,
)
Events.OutgoingPeering.Trigger(&PeeringEvent{Self: m.getID(), Peer: p, Status: status})
} else {
m.log.Debugw("peering requested",
"direction", "in",
"status", status,
"from", p.ID(),
"#out", m.outbound,
"#in", m.inbound,
)
Events.IncomingPeering.Trigger(&PeeringEvent{Self: m.getID(), Peer: p, Status: status})
}
}
package selection
import (
"fmt"
"sync"
"testing"
"time"
"github.com/iotaledger/goshimmer/packages/autopeering/peer"
"github.com/iotaledger/goshimmer/packages/autopeering/peer/peertest"
"github.com/iotaledger/goshimmer/packages/autopeering/salt"
"github.com/iotaledger/goshimmer/packages/database/mapdb"
"github.com/iotaledger/hive.go/events"
"github.com/stretchr/testify/assert"
)
const (
testSaltLifetime = time.Hour // disable salt updates
testUpdateInterval = 2 * graceTime // very short update interval to speed up tests
)
func TestMgrNoDuplicates(t *testing.T) {
const (
nNeighbors = 4
nNodes = 2*nNeighbors + 1
)
SetParameters(Parameters{
OutboundNeighborSize: nNeighbors,
InboundNeighborSize: nNeighbors,
SaltLifetime: testSaltLifetime,
OutboundUpdateInterval: testUpdateInterval,
})
mgrMap := make(map[peer.ID]*manager)
runTestNetwork(nNodes, mgrMap)
for _, mgr := range mgrMap {
assert.NotEmpty(t, mgr.getOutNeighbors())
assert.NotEmpty(t, mgr.getInNeighbors())
assert.Empty(t, getDuplicates(mgr.getNeighbors()))
}
}
func TestEvents(t *testing.T) {
// we want many drops/connects
const (
nNeighbors = 2
nNodes = 10
)
SetParameters(Parameters{
OutboundNeighborSize: nNeighbors,
InboundNeighborSize: nNeighbors,
SaltLifetime: 3 * testUpdateInterval,
OutboundUpdateInterval: testUpdateInterval,
})
e, teardown := newEventMock(t)
defer teardown()
mgrMap := make(map[peer.ID]*manager)
runTestNetwork(nNodes, mgrMap)
// the events should lead to exactly the same neighbors
for _, mgr := range mgrMap {
nc := e.m[mgr.getID()]
assert.ElementsMatchf(t, mgr.getOutNeighbors(), getValues(nc.out),
"out neighbors of %s do not match", mgr.getID())
assert.ElementsMatch(t, mgr.getInNeighbors(), getValues(nc.in),
"in neighbors of %s do not match", mgr.getID())
}
}
func getValues(m map[peer.ID]*peer.Peer) []*peer.Peer {
result := make([]*peer.Peer, 0, len(m))
for _, p := range m {
result = append(result, p)
}
return result
}
func runTestNetwork(n int, mgrMap map[peer.ID]*manager) {
for i := 0; i < n; i++ {
_ = newTestManager(fmt.Sprintf("%d", i), mgrMap)
}
for _, mgr := range mgrMap {
mgr.start()
}
// give the managers time to potentially connect all other peers
time.Sleep((time.Duration(n) - 1) * (outboundUpdateInterval + graceTime))
// close all the managers
for _, mgr := range mgrMap {
mgr.close()
}
}
func getDuplicates(peers []*peer.Peer) []*peer.Peer {
seen := make(map[peer.ID]bool, len(peers))
result := make([]*peer.Peer, 0, len(peers))
for _, p := range peers {
if !seen[p.ID()] {
seen[p.ID()] = true
} else {
result = append(result, p)
}
}
return result
}
type neighbors struct {
out, in map[peer.ID]*peer.Peer
}
type eventMock struct {
t *testing.T
lock sync.Mutex
m map[peer.ID]neighbors
}
func newEventMock(t *testing.T) (*eventMock, func()) {
e := &eventMock{
t: t,
m: make(map[peer.ID]neighbors),
}
outgoingPeeringC := events.NewClosure(e.outgoingPeering)
incomingPeeringC := events.NewClosure(e.incomingPeering)
droppedC := events.NewClosure(e.dropped)
Events.OutgoingPeering.Attach(outgoingPeeringC)
Events.IncomingPeering.Attach(incomingPeeringC)
Events.Dropped.Attach(droppedC)
teardown := func() {
Events.OutgoingPeering.Detach(outgoingPeeringC)
Events.IncomingPeering.Detach(incomingPeeringC)
Events.Dropped.Detach(droppedC)
}
return e, teardown
}
func (e *eventMock) outgoingPeering(ev *PeeringEvent) {
if !ev.Status {
return
}
e.lock.Lock()
defer e.lock.Unlock()
s, ok := e.m[ev.Self]
if !ok {
s = neighbors{out: make(map[peer.ID]*peer.Peer), in: make(map[peer.ID]*peer.Peer)}
e.m[ev.Self] = s
}
assert.NotContains(e.t, s.out, ev.Peer)
s.out[ev.Peer.ID()] = ev.Peer
}
func (e *eventMock) incomingPeering(ev *PeeringEvent) {
if !ev.Status {
return
}
e.lock.Lock()
defer e.lock.Unlock()
s, ok := e.m[ev.Self]
if !ok {
s = neighbors{out: make(map[peer.ID]*peer.Peer), in: make(map[peer.ID]*peer.Peer)}
e.m[ev.Self] = s
}
assert.NotContains(e.t, s.in, ev.Peer)
s.in[ev.Peer.ID()] = ev.Peer
}
func (e *eventMock) dropped(ev *DroppedEvent) {
e.lock.Lock()
defer e.lock.Unlock()
if assert.Contains(e.t, e.m, ev.Self) {
s := e.m[ev.Self]
delete(s.out, ev.DroppedID)
delete(s.in, ev.DroppedID)
}
}
type networkMock struct {
loc *peer.Local
mgr map[peer.ID]*manager
}
func (n *networkMock) local() *peer.Local {
return n.loc
}
func (n *networkMock) PeeringDrop(p *peer.Peer) {
n.mgr[p.ID()].removeNeighbor(n.local().ID())
}
func (n *networkMock) PeeringRequest(p *peer.Peer, s *salt.Salt) (bool, error) {
return n.mgr[p.ID()].requestPeering(&n.local().Peer, s), nil
}
func (n *networkMock) GetKnownPeers() []*peer.Peer {
peers := make([]*peer.Peer, 0, len(n.mgr))
for _, m := range n.mgr {
peers = append(peers, &m.net.local().Peer)
}
return peers
}
func newTestManager(name string, mgrMap map[peer.ID]*manager) *manager {
db, _ := peer.NewDB(mapdb.NewMapDB())
local := peertest.NewLocal("mock", name, db)
networkMock := &networkMock{loc: local, mgr: mgrMap}
m := newManager(networkMock, networkMock.GetKnownPeers, log.Named(name), Config{})
mgrMap[m.getID()] = m
return m
}
package selection
import (
"fmt"
"sync"
"github.com/iotaledger/goshimmer/packages/autopeering/distance"
"github.com/iotaledger/goshimmer/packages/autopeering/peer"
)
type Neighborhood struct {
neighbors []peer.PeerDistance
size int
mu sync.RWMutex
}
func NewNeighborhood(size int) *Neighborhood {
return &Neighborhood{
neighbors: []peer.PeerDistance{},
size: size,
}
}
func (nh *Neighborhood) String() string {
return fmt.Sprintf("%d/%d", nh.GetNumPeers(), nh.size)
}
func (nh *Neighborhood) getFurthest() (peer.PeerDistance, int) {
nh.mu.RLock()
defer nh.mu.RUnlock()
if len(nh.neighbors) < nh.size {
return peer.PeerDistance{
Remote: nil,
Distance: distance.Max,
}, len(nh.neighbors)
}
index := 0
furthest := nh.neighbors[index]
for i, n := range nh.neighbors {
if n.Distance > furthest.Distance {
furthest = n
index = i
}
}
return furthest, index
}
func (nh *Neighborhood) Select(candidates []peer.PeerDistance) peer.PeerDistance {
if len(candidates) > 0 {
target, _ := nh.getFurthest()
for _, candidate := range candidates {
if candidate.Distance < target.Distance {
return candidate
}
}
}
return peer.PeerDistance{}
}
// Add tries to add a new peer with distance to the neighborhood.
// It returns true, if the peer was added, or false if the neighborhood was full.
func (nh *Neighborhood) Add(toAdd peer.PeerDistance) bool {
nh.mu.Lock()
defer nh.mu.Unlock()
if len(nh.neighbors) >= nh.size {
return false
}
nh.neighbors = append(nh.neighbors, toAdd)
return true
}
// RemovePeer removes the peer with the given ID from the neighborhood.
// It returns the peer that was removed or nil of no such peer exists.
func (nh *Neighborhood) RemovePeer(id peer.ID) *peer.Peer {
nh.mu.Lock()
defer nh.mu.Unlock()
index := nh.getPeerIndex(id)
if index < 0 {
return nil
}
n := nh.neighbors[index]
// remove index from slice
if index < len(nh.neighbors)-1 {
copy(nh.neighbors[index:], nh.neighbors[index+1:])
}
nh.neighbors[len(nh.neighbors)-1] = peer.PeerDistance{}
nh.neighbors = nh.neighbors[:len(nh.neighbors)-1]
return n.Remote
}
func (nh *Neighborhood) getPeerIndex(id peer.ID) int {
for i, p := range nh.neighbors {
if p.Remote.ID() == id {
return i
}
}
return -1
}
func (nh *Neighborhood) UpdateDistance(anchor, salt []byte) {
nh.mu.Lock()
defer nh.mu.Unlock()
for i, n := range nh.neighbors {
nh.neighbors[i].Distance = distance.BySalt(anchor, n.Remote.ID().Bytes(), salt)
}
}
func (nh *Neighborhood) IsFull() bool {
nh.mu.RLock()
defer nh.mu.RUnlock()
return len(nh.neighbors) >= nh.size
}
func (nh *Neighborhood) GetPeers() []*peer.Peer {
nh.mu.RLock()
defer nh.mu.RUnlock()
result := make([]*peer.Peer, len(nh.neighbors))
for i, n := range nh.neighbors {
result[i] = n.Remote
}
return result
}
func (nh *Neighborhood) GetNumPeers() int {
nh.mu.RLock()
defer nh.mu.RUnlock()
return len(nh.neighbors)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package server
import "errors"
var (
// ErrTimeout is returned when an expected response was not received in time.
ErrTimeout = errors.New("response timeout")
// ErrClosed means that the server was shut down before a response could be received.
ErrClosed = errors.New("socket closed")
// ErrNoMessage is returned when the package did not contain any data.
ErrNoMessage = errors.New("packet does not contain a message")
// ErrInvalidMessage means that no handler could process the received message.
ErrInvalidMessage = errors.New("invalid message")
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment