Skip to content
Snippets Groups Projects
Unverified Commit 7841117f authored by Jonas Theis's avatar Jonas Theis Committed by GitHub
Browse files

Add message layer integration tests (#416)

* Add common integration tests

* Add message layer persistence test

* Adjust bootstrap time
parent 94bd10d3
No related branches found
No related tags found
No related merge requests found
...@@ -88,6 +88,7 @@ func (d *DockerContainer) CreateGoShimmerPeer(config GoShimmerConfig) error { ...@@ -88,6 +88,7 @@ func (d *DockerContainer) CreateGoShimmerPeer(config GoShimmerConfig) error {
} }
return "" return ""
}()), }()),
fmt.Sprintf("--bootstrap.initialIssuance.timePeriodSec=%d", config.BootstrapInitialIssuanceTimePeriodSec),
"--webapi.bindAddress=0.0.0.0:8080", "--webapi.bindAddress=0.0.0.0:8080",
fmt.Sprintf("--autopeering.seed=%s", config.Seed), fmt.Sprintf("--autopeering.seed=%s", config.Seed),
fmt.Sprintf("--autopeering.entryNodes=%s@%s:14626", config.EntryNodePublicKey, config.EntryNodeHost), fmt.Sprintf("--autopeering.entryNodes=%s@%s:14626", config.EntryNodePublicKey, config.EntryNodeHost),
...@@ -155,7 +156,7 @@ func (d *DockerContainer) Remove() error { ...@@ -155,7 +156,7 @@ func (d *DockerContainer) Remove() error {
// Stop stops a container without terminating the process. // Stop stops a container without terminating the process.
// The process is blocked until the container stops or the timeout expires. // The process is blocked until the container stops or the timeout expires.
func (d *DockerContainer) Stop() error { func (d *DockerContainer) Stop() error {
duration := 10 * time.Second duration := 30 * time.Second
return d.client.ContainerStop(context.Background(), d.id, &duration) return d.client.ContainerStop(context.Background(), d.id, &duration)
} }
......
...@@ -61,7 +61,7 @@ func newFramework() (*Framework, error) { ...@@ -61,7 +61,7 @@ func newFramework() (*Framework, error) {
// CreateNetwork creates and returns a (Docker) Network that contains `peers` GoShimmer nodes. // CreateNetwork creates and returns a (Docker) Network that contains `peers` GoShimmer nodes.
// It waits for the peers to autopeer until the minimum neighbors criteria is met for every peer. // It waits for the peers to autopeer until the minimum neighbors criteria is met for every peer.
// The first peer automatically starts with the bootstrap plugin enabled. // The first peer automatically starts with the bootstrap plugin enabled.
func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int) (*Network, error) { func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int, networkConfig ...NetworkConfig) (*Network, error) {
network, err := newNetwork(f.dockerClient, strings.ToLower(name), f.tester) network, err := newNetwork(f.dockerClient, strings.ToLower(name), f.tester)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -72,10 +72,19 @@ func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int) ...@@ -72,10 +72,19 @@ func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int)
return nil, err return nil, err
} }
// configuration of bootstrap plugin
bootstrapInitialIssuanceTimePeriodSec := -1
if len(networkConfig) > 0 {
bootstrapInitialIssuanceTimePeriodSec = networkConfig[0].BootstrapInitialIssuanceTimePeriodSec
}
// create peers/GoShimmer nodes // create peers/GoShimmer nodes
for i := 0; i < peers; i++ { for i := 0; i < peers; i++ {
bootstrap := i == 0 config := GoShimmerConfig{
if _, err = network.CreatePeer(bootstrap); err != nil { Bootstrap: i == 0,
BootstrapInitialIssuanceTimePeriodSec: bootstrapInitialIssuanceTimePeriodSec,
}
if _, err = network.CreatePeer(config); err != nil {
return nil, err return nil, err
} }
} }
......
...@@ -83,7 +83,7 @@ func (n *Network) createEntryNode() error { ...@@ -83,7 +83,7 @@ func (n *Network) createEntryNode() error {
// CreatePeer creates a new peer/GoShimmer node in the network and returns it. // CreatePeer creates a new peer/GoShimmer node in the network and returns it.
// Passing bootstrap true enables the bootstrap plugin on the given peer. // Passing bootstrap true enables the bootstrap plugin on the given peer.
func (n *Network) CreatePeer(bootstrap bool) (*Peer, error) { func (n *Network) CreatePeer(c GoShimmerConfig) (*Peer, error) {
name := n.namePrefix(fmt.Sprintf("%s%d", containerNameReplica, len(n.peers))) name := n.namePrefix(fmt.Sprintf("%s%d", containerNameReplica, len(n.peers)))
// create identity // create identity
...@@ -93,16 +93,16 @@ func (n *Network) CreatePeer(bootstrap bool) (*Peer, error) { ...@@ -93,16 +93,16 @@ func (n *Network) CreatePeer(bootstrap bool) (*Peer, error) {
} }
seed := base64.StdEncoding.EncodeToString(ed25519.PrivateKey(privateKey.Bytes()).Seed()) seed := base64.StdEncoding.EncodeToString(ed25519.PrivateKey(privateKey.Bytes()).Seed())
config := c
config.Name = name
config.Seed = seed
config.EntryNodeHost = n.namePrefix(containerNameEntryNode)
config.EntryNodePublicKey = n.entryNodePublicKey()
config.DisabledPlugins = disabledPluginsPeer
// create Docker container // create Docker container
container := NewDockerContainer(n.dockerClient) container := NewDockerContainer(n.dockerClient)
err = container.CreateGoShimmerPeer(GoShimmerConfig{ err = container.CreateGoShimmerPeer(config)
Name: name,
Seed: seed,
EntryNodeHost: n.namePrefix(containerNameEntryNode),
EntryNodePublicKey: n.entryNodePublicKey(),
Bootstrap: bootstrap,
DisabledPlugins: disabledPluginsPeer,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -12,25 +12,32 @@ const ( ...@@ -12,25 +12,32 @@ const (
logsDir = "/tmp/logs/" logsDir = "/tmp/logs/"
disabledPluginsEntryNode = "portcheck,dashboard,analysis,gossip,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint" disabledPluginsEntryNode = "portcheck,dashboard,analysis-client,gossip,drng,issuer,sync,metrics,messagelayer,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint"
disabledPluginsPeer = "portcheck,dashboard,analysis" disabledPluginsPeer = "portcheck,dashboard,analysis-client"
dockerLogsPrefixLen = 8 dockerLogsPrefixLen = 8
dkgMaxTries = 50 dkgMaxTries = 50
) )
// GoShimmerConfig defines the config of a goshimmer node. // GoShimmerConfig defines the config of a GoShimmer node.
type GoShimmerConfig struct { type GoShimmerConfig struct {
Seed string Seed string
Name string Name string
EntryNodeHost string EntryNodeHost string
EntryNodePublicKey string EntryNodePublicKey string
Bootstrap bool
DisabledPlugins string DisabledPlugins string
Bootstrap bool
BootstrapInitialIssuanceTimePeriodSec int
DRNGCommittee string DRNGCommittee string
DRNGDistKey string DRNGDistKey string
DRNGInstance int DRNGInstance int
DRNGThreshold int DRNGThreshold int
} }
// NetworkConfig defines the config of a GoShimmer Docker network.
type NetworkConfig struct {
BootstrapInitialIssuanceTimePeriodSec int
}
package tests
import (
"testing"
"time"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestSynchronization checks whether messages are relayed through the network,
// a node that joins later solidifies, whether it is desyned after a restart
// and becomes synced again.
func TestSynchronization(t *testing.T) {
config := framework.NetworkConfig{
BootstrapInitialIssuanceTimePeriodSec: 40,
}
initalPeers := 4
n, err := f.CreateNetwork("common_TestSynchronization", initalPeers, 2, config)
require.NoError(t, err)
defer n.Shutdown()
// wait for peers to change their state to synchronized
time.Sleep(5 * time.Second)
numMessages := 100
// 1. issue data messages
ids := sendDataMessagesOnRandomPeer(t, n.Peers(), numMessages)
// wait for messages to be gossiped
time.Sleep(5 * time.Second)
// 2. spawn peer without knowledge of previous messages
newPeer, err := n.CreatePeer(framework.GoShimmerConfig{})
require.NoError(t, err)
err = n.WaitForAutopeering(3)
require.NoError(t, err)
// 3. issue some messages on old peers so that new peer can solidify
ids = sendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
// wait for peer to solidify
time.Sleep(10 * time.Second)
// 4. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
// 5. shut down newly added peer
err = newPeer.Stop()
require.NoError(t, err)
// 6. let it startup again
err = newPeer.Start()
require.NoError(t, err)
// wait for peer to start
time.Sleep(2 * time.Second)
// 7. check that it is in state desynced
resp, err := newPeer.Info()
require.NoError(t, err)
assert.Falsef(t, resp.Synced, "Peer %s should be desynced but is synced!", newPeer.String())
// 8. issue some messages on old peers so that new peer can sync again
ids = sendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
// wait for peer to sync
time.Sleep(5 * time.Second)
// 9. newPeer becomes synced again
resp, err = newPeer.Info()
require.NoError(t, err)
assert.Truef(t, resp.Synced, "Peer %s should be synced but is desynced!", newPeer.String())
// 10. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
}
package tests
import (
"bufio"
"regexp"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDockerLogs simply verifies that a peer's log message contains "GoShimmer".
// Using the combination of logs and regular expressions can be useful to test a certain peer's behavior.
func TestDockerLogs(t *testing.T) {
n, err := f.CreateNetwork("TestDockerLogs", 3, 1)
require.NoError(t, err)
defer n.Shutdown()
r := regexp.MustCompile("GoShimmer")
for _, p := range n.Peers() {
log, err := p.Logs()
require.NoError(t, err)
assert.True(t, r.MatchReader(bufio.NewReader(log)))
}
}
package tests
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
// TestPersistence issues messages on random peers, restarts them and checks for persistence after restart.
func TestPersistence(t *testing.T) {
n, err := f.CreateNetwork("message_TestPersistence", 4, 2)
require.NoError(t, err)
defer n.Shutdown()
// wait for peers to change their state to synchronized
time.Sleep(5 * time.Second)
// 1. issue data messages
ids := sendDataMessagesOnRandomPeer(t, n.Peers(), 100)
// wait for messages to be gossiped
time.Sleep(5 * time.Second)
// 2. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
// 3. stop all nodes
for _, peer := range n.Peers() {
err = peer.Stop()
require.NoError(t, err)
}
// 4. start all nodes
for _, peer := range n.Peers() {
err = peer.Start()
require.NoError(t, err)
}
// 5. check whether all issued messages are persistently available on all nodes
checkForMessageIds(t, n.Peers(), ids, false)
}
package tests
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRelayMessages checks whether messages are actually relayed/gossiped through the network
// by checking the messages' existence on all nodes after a cool down.
func TestRelayMessages(t *testing.T) {
n, err := f.CreateNetwork("TestRelayMessages", 6, 3)
require.NoError(t, err)
defer n.Shutdown()
numMessages := 105
ids := make([]string, numMessages)
data := []byte("Test")
// create messages on random peers
for i := 0; i < numMessages; i++ {
peer := n.RandomPeer()
id, err := peer.Data(data)
require.NoError(t, err)
ids[i] = id
}
// wait for messages to be gossiped
time.Sleep(10 * time.Second)
// check for messages on every peer
for _, peer := range n.Peers() {
resp, err := peer.FindMessageByID(ids)
require.NoError(t, err)
// check that all messages are present in response
respIDs := make([]string, len(resp.Messages))
for i, msg := range resp.Messages {
respIDs[i] = msg.ID
}
assert.ElementsMatchf(t, ids, respIDs, "messages do not match sent in %s", peer.String())
}
}
...@@ -2,8 +2,8 @@ package tests ...@@ -2,8 +2,8 @@ package tests
import ( import (
"fmt" "fmt"
"math/rand"
"testing" "testing"
"time"
"github.com/iotaledger/goshimmer/packages/binary/messagelayer/payload" "github.com/iotaledger/goshimmer/packages/binary/messagelayer/payload"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework" "github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
...@@ -11,78 +11,41 @@ import ( ...@@ -11,78 +11,41 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestNodeSynchronization checks whether messages are synchronized by a peer that joined the network later // DataMessageSent defines a struct to identify from which issuer a data message was sent.
// and initially missed some messages. type DataMessageSent struct {
func TestNodeSynchronization(t *testing.T) { number int
n, err := f.CreateNetwork("TestNodeSynchronization", 4, 2) id string
require.NoError(t, err) data []byte
defer n.Shutdown() issuerPublicKey string
}
// wait for peers to change their state to synchronized
time.Sleep(5 * time.Second)
numMessages := 100
idsMap := make(map[string]MessageSent, numMessages)
ids := make([]string, numMessages)
// create messages on random peers
for i := 0; i < numMessages; i++ {
data := []byte(fmt.Sprintf("Test%d", i))
peer := n.RandomPeer()
id, sent := sendDataMessage(t, peer, data, i)
idsMap[id] = sent // sendDataMessagesOnRandomPeer sends data messages on a random peer and saves the sent message to a map.
ids[i] = id func sendDataMessagesOnRandomPeer(t *testing.T, peers []*framework.Peer, numMessages int, idsMap ...map[string]DataMessageSent) map[string]DataMessageSent {
var ids map[string]DataMessageSent
if len(idsMap) > 0 {
ids = idsMap[0]
} else {
ids = make(map[string]DataMessageSent, numMessages)
} }
// wait for messages to be gossiped
time.Sleep(5 * time.Second)
// make sure every peer got every message
checkForMessageIds(t, n.Peers(), ids, idsMap)
// spawn peer without knowledge of previous messages
newPeer, err := n.CreatePeer(false)
require.NoError(t, err)
err = n.WaitForAutopeering(3)
require.NoError(t, err)
ids2 := make([]string, numMessages)
// create messages on random peers
for i := 0; i < numMessages; i++ { for i := 0; i < numMessages; i++ {
data := []byte(fmt.Sprintf("Test%d", i)) data := []byte(fmt.Sprintf("Test%d", i))
peer := n.RandomPeer() peer := peers[rand.Intn(len(peers))]
id, sent := sendDataMessage(t, peer, data, i) id, sent := sendDataMessage(t, peer, data, i)
ids2[i] = id ids[id] = sent
idsMap[id] = sent
} }
// wait for messages to be gossiped return ids
time.Sleep(5 * time.Second)
// check whether peer has synchronized ids (previous messages)
checkForMessageIds(t, []*framework.Peer{newPeer}, ids, idsMap)
// make sure every peer got every message
checkForMessageIds(t, n.Peers(), ids2, idsMap)
}
type MessageSent struct {
number int
id string
data []byte
issuerPublicKey string
} }
func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int) (string, MessageSent) { // sendDataMessage sends a data message on a given peer and returns the id and a DataMessageSent struct.
func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int) (string, DataMessageSent) {
id, err := peer.Data(data) id, err := peer.Data(data)
require.NoError(t, err) require.NoErrorf(t, err, "Could not send message on %s", peer.String())
sent := MessageSent{ sent := DataMessageSent{
number: number, number: number,
id: id, id: id,
// save payload to be able to compare API response // save payload to be able to compare API response
...@@ -92,14 +55,22 @@ func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int ...@@ -92,14 +55,22 @@ func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int
return id, sent return id, sent
} }
func checkForMessageIds(t *testing.T, peers []*framework.Peer, ids []string, idsMap map[string]MessageSent) { // checkForMessageIds performs checks to make sure that all peers received all given messages defined in ids.
func checkForMessageIds(t *testing.T, peers []*framework.Peer, ids map[string]DataMessageSent, checkSynchronized bool) {
var idsSlice []string
for id := range ids {
idsSlice = append(idsSlice, id)
}
for _, peer := range peers { for _, peer := range peers {
// check that the peer sees itself as synchronized if checkSynchronized {
info, err := peer.Info() // check that the peer sees itself as synchronized
require.NoError(t, err) info, err := peer.Info()
require.True(t, info.Synced) require.NoError(t, err)
require.True(t, info.Synced)
}
resp, err := peer.FindMessageByID(ids) resp, err := peer.FindMessageByID(idsSlice)
require.NoError(t, err) require.NoError(t, err)
// check that all messages are present in response // check that all messages are present in response
...@@ -107,11 +78,11 @@ func checkForMessageIds(t *testing.T, peers []*framework.Peer, ids []string, ids ...@@ -107,11 +78,11 @@ func checkForMessageIds(t *testing.T, peers []*framework.Peer, ids []string, ids
for i, msg := range resp.Messages { for i, msg := range resp.Messages {
respIDs[i] = msg.ID respIDs[i] = msg.ID
} }
assert.ElementsMatchf(t, ids, respIDs, "messages do not match sent in %s", peer.String()) assert.ElementsMatchf(t, idsSlice, respIDs, "messages do not match sent in %s", peer.String())
// check for general information // check for general information
for _, msg := range resp.Messages { for _, msg := range resp.Messages {
msgSent := idsMap[msg.ID] msgSent := ids[msg.ID]
assert.Equalf(t, msgSent.issuerPublicKey, msg.IssuerPublicKey, "messageID=%s, issuer=%s not correct issuer in %s.", msgSent.id, msgSent.issuerPublicKey, peer.String()) assert.Equalf(t, msgSent.issuerPublicKey, msg.IssuerPublicKey, "messageID=%s, issuer=%s not correct issuer in %s.", msgSent.id, msgSent.issuerPublicKey, peer.String())
assert.Equalf(t, msgSent.data, msg.Payload, "messageID=%s, issuer=%s data not equal in %s.", msgSent.id, msgSent.issuerPublicKey, peer.String()) assert.Equalf(t, msgSent.data, msg.Payload, "messageID=%s, issuer=%s data not equal in %s.", msgSent.id, msgSent.issuerPublicKey, peer.String())
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment