Skip to content
Snippets Groups Projects
Unverified Commit fc739a8f authored by Jonas Theis's avatar Jonas Theis Committed by GitHub
Browse files

Parallel integration tests (#443)

* Split tests in different packages so that they can run in parallel

* Set name to env name

* Add jobs for other integration tests

* Adjust waiting times

* Remove cleanup from workflow

* Add timeout to wait for peers to start
parent eaa345e4
No related branches found
No related tags found
No related merge requests found
Showing
with 229 additions and 53 deletions
name: Test GoShimmer
name: Integration tests
on: pull_request
jobs:
integration-test:
name: Integration Tests
autopeering:
name: autopeering
env:
TEST_NAME: autopeering
runs-on: ubuntu-latest
steps:
......@@ -33,11 +35,111 @@ jobs:
if: always()
uses: actions/upload-artifact@v1
with:
name: container-logs
name: ${{ env.TEST_NAME }}
path: tools/integration-tests/logs
- name: Clean up
common:
name: common
env:
TEST_NAME: common
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Build GoShimmer image
run: docker build -t iotaledger/goshimmer .
- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:latest
docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
- name: Run integration tests
run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
- name: Create logs from tester
if: always()
run: |
docker logs tester &> tools/integration-tests/logs/tester.log
- name: Save logs as artifacts
if: always()
uses: actions/upload-artifact@v1
with:
name: ${{ env.TEST_NAME }}
path: tools/integration-tests/logs
drng:
name: drng
env:
TEST_NAME: drng
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Build GoShimmer image
run: docker build -t iotaledger/goshimmer .
- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:latest
docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
- name: Run integration tests
run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
- name: Create logs from tester
if: always()
run: |
docker logs tester &> tools/integration-tests/logs/tester.log
- name: Save logs as artifacts
if: always()
uses: actions/upload-artifact@v1
with:
name: ${{ env.TEST_NAME }}
path: tools/integration-tests/logs
message:
name: message
env:
TEST_NAME: message
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Build GoShimmer image
run: docker build -t iotaledger/goshimmer .
- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:latest
docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
- name: Run integration tests
run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
- name: Create logs from tester
if: always()
run: |
docker-compose -f tools/integration-tests/tester/docker-compose.yml down
docker rm -f $(docker ps -a -q -f ancestor=gaiadocker/iproute2)
\ No newline at end of file
docker logs tester &> tools/integration-tests/logs/tester.log
- name: Save logs as artifacts
if: always()
uses: actions/upload-artifact@v1
with:
name: ${{ env.TEST_NAME }}
path: tools/integration-tests/logs
#!/bin/bash
TEST_NAMES='autopeering common drng message'
echo "Build GoShimmer image"
docker build -t iotaledger/goshimmer ../../.
......@@ -9,10 +11,12 @@ docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
echo "Run integration tests"
docker-compose -f tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
echo "Create logs from containers in network"
docker logs tester &> logs/tester.log
for name in $TEST_NAMES
do
TEST_NAME=$name docker-compose -f tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
docker logs tester &> logs/"$name"_tester.log
done
echo "Clean up"
docker-compose -f tester/docker-compose.yml down
......
......@@ -5,7 +5,7 @@ services:
container_name: tester
image: golang:1.14
working_dir: /tmp/goshimmer/tools/integration-tests/tester
entrypoint: go test ./tests -v -mod=readonly -timeout 30m
entrypoint: go test ./tests/${TEST_NAME} -v -mod=readonly -timeout 30m
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ../../..:/tmp/goshimmer:ro
......
package framework
const (
autopeeringMaxTries = 25
autopeeringMaxTries = 50
apiPort = "8080"
......
package tests
package autopeering
import (
"testing"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
......@@ -10,7 +11,7 @@ import (
func TestNetworkSplit(t *testing.T) {
n, err := f.CreateNetworkWithPartitions("autopeering_TestNetworkSplit", 6, 2, 2)
require.NoError(t, err)
defer ShutdownNetwork(t, n)
defer tests.ShutdownNetwork(t, n)
// test that nodes only have neighbors from same partition
for _, partition := range n.Partitions() {
......
// Package tests provides the possibility to write integration tests in regular Go style.
// The integration test framework is initialized before any test in the package runs and
// thus can readily be used to create networks.
//
// Each tested feature should reside in its own test file and define tests cases and networks as necessary.
package tests
package autopeering
import (
"os"
......
package tests
package autopeering
import (
"testing"
"time"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
......@@ -13,13 +14,10 @@ import (
// a node that joins later solidifies, whether it is desyned after a restart
// and becomes synced again.
func TestSynchronization(t *testing.T) {
config := framework.NetworkConfig{
BootstrapInitialIssuanceTimePeriodSec: 40,
}
initalPeers := 4
n, err := f.CreateNetwork("common_TestSynchronization", initalPeers, 2, config)
n, err := f.CreateNetwork("common_TestSynchronization", initalPeers, 2)
require.NoError(t, err)
defer ShutdownNetwork(t, n)
defer tests.ShutdownNetwork(t, n)
// wait for peers to change their state to synchronized
time.Sleep(5 * time.Second)
......@@ -27,10 +25,10 @@ func TestSynchronization(t *testing.T) {
numMessages := 100
// 1. issue data messages
ids := sendDataMessagesOnRandomPeer(t, n.Peers(), numMessages)
ids := tests.SendDataMessagesOnRandomPeer(t, n.Peers(), numMessages)
// wait for messages to be gossiped
time.Sleep(5 * time.Second)
time.Sleep(10 * time.Second)
// 2. spawn peer without knowledge of previous messages
newPeer, err := n.CreatePeer(framework.GoShimmerConfig{})
......@@ -39,13 +37,13 @@ func TestSynchronization(t *testing.T) {
require.NoError(t, err)
// 3. issue some messages on old peers so that new peer can solidify
ids = sendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
ids = tests.SendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
// wait for peer to solidify
time.Sleep(10 * time.Second)
time.Sleep(15 * time.Second)
// 4. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
tests.CheckForMessageIds(t, n.Peers(), ids, true)
// 5. shut down newly added peer
err = newPeer.Stop()
......@@ -55,23 +53,25 @@ func TestSynchronization(t *testing.T) {
err = newPeer.Start()
require.NoError(t, err)
// wait for peer to start
time.Sleep(2 * time.Second)
time.Sleep(5 * time.Second)
// note: this check is too dependent on the initial time a node sends bootstrap messages
// and therefore very error prone. Therefore it's not done for now.
// 7. check that it is in state desynced
resp, err := newPeer.Info()
require.NoError(t, err)
assert.Falsef(t, resp.Synced, "Peer %s should be desynced but is synced!", newPeer.String())
//resp, err := newPeer.Info()
//require.NoError(t, err)
//assert.Falsef(t, resp.Synced, "Peer %s should be desynced but is synced!", newPeer.String())
// 8. issue some messages on old peers so that new peer can sync again
ids = sendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
ids = tests.SendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
// wait for peer to sync
time.Sleep(10 * time.Second)
// 9. newPeer becomes synced again
resp, err = newPeer.Info()
resp, err := newPeer.Info()
require.NoError(t, err)
assert.Truef(t, resp.Synced, "Peer %s should be synced but is desynced!", newPeer.String())
// 10. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
tests.CheckForMessageIds(t, n.Peers(), ids, true)
}
package autopeering
import (
"os"
"testing"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
)
var f *framework.Framework
// TestMain gets called by the test utility and is executed before any other test in this package.
// It is therefore used to initialize the integration testing framework.
func TestMain(m *testing.M) {
var err error
f, err = framework.Instance()
if err != nil {
panic(err)
}
// call the tests
os.Exit(m.Run())
}
package tests
package autopeering
import (
"encoding/json"
......@@ -9,6 +9,7 @@ import (
"time"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests"
"github.com/stretchr/testify/require"
)
......@@ -23,7 +24,7 @@ func TestDRNG(t *testing.T) {
drng, err := f.CreateDRNGNetwork("TestDRNG", 5, 8, 3)
require.NoError(t, err)
defer ShutdownNetwork(t, drng)
defer tests.ShutdownNetwork(t, drng)
// wait for randomness generation to be started
log.Printf("Waiting for randomness generation to be started...\n")
......
package autopeering
import (
"os"
"testing"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
)
var f *framework.Framework
// TestMain gets called by the test utility and is executed before any other test in this package.
// It is therefore used to initialize the integration testing framework.
func TestMain(m *testing.M) {
var err error
f, err = framework.Instance()
if err != nil {
panic(err)
}
// call the tests
os.Exit(m.Run())
}
package autopeering
import (
"os"
"testing"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework"
)
var f *framework.Framework
// TestMain gets called by the test utility and is executed before any other test in this package.
// It is therefore used to initialize the integration testing framework.
func TestMain(m *testing.M) {
var err error
f, err = framework.Instance()
if err != nil {
panic(err)
}
// call the tests
os.Exit(m.Run())
}
package tests
package autopeering
import (
"testing"
"time"
"github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests"
"github.com/stretchr/testify/require"
)
......@@ -11,19 +12,19 @@ import (
func TestPersistence(t *testing.T) {
n, err := f.CreateNetwork("message_TestPersistence", 4, 2)
require.NoError(t, err)
defer ShutdownNetwork(t, n)
defer tests.ShutdownNetwork(t, n)
// wait for peers to change their state to synchronized
time.Sleep(5 * time.Second)
// 1. issue data messages
ids := sendDataMessagesOnRandomPeer(t, n.Peers(), 100)
ids := tests.SendDataMessagesOnRandomPeer(t, n.Peers(), 100)
// wait for messages to be gossiped
time.Sleep(5 * time.Second)
time.Sleep(10 * time.Second)
// 2. check whether all issued messages are available on all nodes
checkForMessageIds(t, n.Peers(), ids, true)
tests.CheckForMessageIds(t, n.Peers(), ids, true)
// 3. stop all nodes
for _, peer := range n.Peers() {
......@@ -37,6 +38,9 @@ func TestPersistence(t *testing.T) {
require.NoError(t, err)
}
// wait for peers to start
time.Sleep(10 * time.Second)
// 5. check whether all issued messages are persistently available on all nodes
checkForMessageIds(t, n.Peers(), ids, false)
tests.CheckForMessageIds(t, n.Peers(), ids, false)
}
......@@ -23,8 +23,8 @@ type Shutdowner interface {
Shutdown() error
}
// sendDataMessagesOnRandomPeer sends data messages on a random peer and saves the sent message to a map.
func sendDataMessagesOnRandomPeer(t *testing.T, peers []*framework.Peer, numMessages int, idsMap ...map[string]DataMessageSent) map[string]DataMessageSent {
// SendDataMessagesOnRandomPeer sends data messages on a random peer and saves the sent message to a map.
func SendDataMessagesOnRandomPeer(t *testing.T, peers []*framework.Peer, numMessages int, idsMap ...map[string]DataMessageSent) map[string]DataMessageSent {
var ids map[string]DataMessageSent
if len(idsMap) > 0 {
ids = idsMap[0]
......@@ -36,7 +36,7 @@ func sendDataMessagesOnRandomPeer(t *testing.T, peers []*framework.Peer, numMess
data := []byte(fmt.Sprintf("Test%d", i))
peer := peers[rand.Intn(len(peers))]
id, sent := sendDataMessage(t, peer, data, i)
id, sent := SendDataMessage(t, peer, data, i)
ids[id] = sent
}
......@@ -44,8 +44,8 @@ func sendDataMessagesOnRandomPeer(t *testing.T, peers []*framework.Peer, numMess
return ids
}
// sendDataMessage sends a data message on a given peer and returns the id and a DataMessageSent struct.
func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int) (string, DataMessageSent) {
// SendDataMessage sends a data message on a given peer and returns the id and a DataMessageSent struct.
func SendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int) (string, DataMessageSent) {
id, err := peer.Data(data)
require.NoErrorf(t, err, "Could not send message on %s", peer.String())
......@@ -59,8 +59,8 @@ func sendDataMessage(t *testing.T, peer *framework.Peer, data []byte, number int
return id, sent
}
// checkForMessageIds performs checks to make sure that all peers received all given messages defined in ids.
func checkForMessageIds(t *testing.T, peers []*framework.Peer, ids map[string]DataMessageSent, checkSynchronized bool) {
// CheckForMessageIds performs checks to make sure that all peers received all given messages defined in ids.
func CheckForMessageIds(t *testing.T, peers []*framework.Peer, ids map[string]DataMessageSent, checkSynchronized bool) {
var idsSlice []string
for id := range ids {
idsSlice = append(idsSlice, id)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment