Skip to content
Snippets Groups Projects
Unverified Commit 9a10cacb authored by Jonas Theis's avatar Jonas Theis Committed by GitHub
Browse files

Autopeering integration tests (#427)

* Add caching volume to tester container to significantly speed up integration test development

* Increase wait time for peer to sync

* Enable the creation of a partitioned network with Pumba to block traffic between the partitions

* Add autopeering test
parent 99da8900
No related branches found
No related tags found
No related merge requests found
Showing with 382 additions and 23 deletions
......@@ -15,8 +15,11 @@ jobs:
- name: Build GoShimmer image
run: docker build -t iotaledger/goshimmer .
- name: Pull drand image
run: docker pull angelocapossele/drand:latest
- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:latest
docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
- name: Run integration tests
run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
......@@ -35,4 +38,6 @@ jobs:
- name: Clean up
if: always()
run: docker-compose -f tools/integration-tests/tester/docker-compose.yml down
run: |
docker-compose -f tools/integration-tests/tester/docker-compose.yml down
docker rm -f $(docker ps -a -q -f ancestor=gaiadocker/iproute2)
\ No newline at end of file
......@@ -3,8 +3,10 @@
echo "Build GoShimmer image"
docker build -t iotaledger/goshimmer ../../.
echo "Pulling drand image"
echo "Pull additional Docker images"
docker pull angelocapossele/drand:latest
docker pull gaiaadm/pumba:latest
docker pull gaiadocker/iproute2:latest
echo "Run integration tests"
docker-compose -f tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build
......@@ -13,4 +15,5 @@ echo "Create logs from containers in network"
docker logs tester &> logs/tester.log
echo "Clean up"
docker-compose -f tester/docker-compose.yml down
\ No newline at end of file
docker-compose -f tester/docker-compose.yml down
docker rm -f $(docker ps -a -q -f ancestor=gaiadocker/iproute2)
......@@ -4,9 +4,14 @@ services:
tester:
container_name: tester
image: golang:1.14
working_dir: /go/src/github.com/iotaledger/goshimmer/tools/integration-tests/tester
entrypoint: go test ./tests -v -mod=readonly
working_dir: /tmp/goshimmer/tools/integration-tests/tester
entrypoint: go test ./tests -v -mod=readonly -timeout 30m
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ../../..:/go/src/github.com/iotaledger/goshimmer:ro
- ../logs:/tmp/logs
\ No newline at end of file
- ../../..:/tmp/goshimmer:ro
- ../logs:/tmp/logs
- goshimmer-testing-cache:/go
volumes:
goshimmer-testing-cache:
name: goshimmer-testing-cache
......@@ -122,9 +122,47 @@ func (d *DockerContainer) CreateDrandMember(name string, goShimmerAPI string, le
return d.CreateContainer(name, containerConfig)
}
// CreatePumba creates a new container with Pumba configuration.
func (d *DockerContainer) CreatePumba(name string, containerName string, targetIPs []string) error {
hostConfig := &container.HostConfig{
Binds: strslice.StrSlice{"/var/run/docker.sock:/var/run/docker.sock:ro"},
}
cmd := strslice.StrSlice{
"--log-level=debug",
"netem",
"--duration=100m",
}
for _, ip := range targetIPs {
targetFlag := "--target=" + ip
cmd = append(cmd, targetFlag)
}
slice := strslice.StrSlice{
"--tc-image=gaiadocker/iproute2",
"loss",
"--percent=100",
containerName,
}
cmd = append(cmd, slice...)
containerConfig := &container.Config{
Image: "gaiaadm/pumba:latest",
Cmd: cmd,
}
return d.CreateContainer(name, containerConfig, hostConfig)
}
// CreateContainer creates a new container with the given configuration.
func (d *DockerContainer) CreateContainer(name string, containerConfig *container.Config) error {
resp, err := d.client.ContainerCreate(context.Background(), containerConfig, nil, nil, name)
func (d *DockerContainer) CreateContainer(name string, containerConfig *container.Config, hostConfigs ...*container.HostConfig) error {
var hostConfig *container.HostConfig
if len(hostConfigs) > 0 {
hostConfig = hostConfigs[0]
}
resp, err := d.client.ContainerCreate(context.Background(), containerConfig, hostConfig, nil, name)
if err != nil {
return err
}
......@@ -170,6 +208,22 @@ func (d *DockerContainer) ExitStatus() (int, error) {
return resp.State.ExitCode, nil
}
// IP returns the IP address according to the container information for the given network.
func (d *DockerContainer) IP(network string) (string, error) {
resp, err := d.client.ContainerInspect(context.Background(), d.id)
if err != nil {
return "", err
}
for name, v := range resp.NetworkSettings.Networks {
if name == network {
return v.IPAddress, nil
}
}
return "", fmt.Errorf("IP address in %s could not be determined", network)
}
// Logs returns the logs of the container as io.ReadCloser.
func (d *DockerContainer) Logs() (io.ReadCloser, error) {
options := types.ContainerLogsOptions{
......
......@@ -55,7 +55,10 @@ func (n *DRNGNetwork) CreatePeer(c GoShimmerConfig, publicKey hive_ed25519.Publi
return nil, err
}
peer := newPeer(name, identity.New(publicKey), container)
peer, err := newPeer(name, identity.New(publicKey), container, n.network)
if err != nil {
return nil, err
}
n.network.peers = append(n.network.peers, peer)
return peer, nil
}
......
......@@ -12,6 +12,7 @@ import (
"sync"
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
hive_ed25519 "github.com/iotaledger/hive.go/crypto/ed25519"
)
......@@ -99,6 +100,88 @@ func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int,
return network, nil
}
// CreateNetworkWithPartitions creates and returns a partitioned network that contains `peers` GoShimmer nodes per partition.
// It waits for the peers to autopeer until the minimum neighbors criteria is met for every peer.
// The first peer automatically starts with the bootstrap plugin enabled.
func (f *Framework) CreateNetworkWithPartitions(name string, peers, partitions, minimumNeighbors int) (*Network, error) {
network, err := newNetwork(f.dockerClient, strings.ToLower(name), f.tester)
if err != nil {
return nil, err
}
err = network.createEntryNode()
if err != nil {
return nil, err
}
// block all traffic from/to entry node
pumbaEntryNodeName := network.namePrefix(containerNameEntryNode) + containerNameSuffixPumba
pumbaEntryNode, err := network.createPumba(
pumbaEntryNodeName,
network.namePrefix(containerNameEntryNode),
strslice.StrSlice{},
)
if err != nil {
return nil, err
}
// wait until pumba is started and blocks all traffic
time.Sleep(5 * time.Second)
// create peers/GoShimmer nodes
for i := 0; i < peers; i++ {
config := GoShimmerConfig{
Bootstrap: i == 0,
}
if _, err = network.CreatePeer(config); err != nil {
return nil, err
}
}
// wait until containers are fully started
time.Sleep(2 * time.Second)
// create partitions
chunkSize := peers / partitions
var end int
for i := 0; end < peers; i += chunkSize {
end = i + chunkSize
// last partitions takes the rest
if i/chunkSize == partitions-1 {
end = peers
}
_, err = network.createPartition(network.peers[i:end])
if err != nil {
return nil, err
}
}
// wait until pumba containers are started and block traffic between partitions
time.Sleep(5 * time.Second)
// delete pumba for entry node
err = pumbaEntryNode.Stop()
if err != nil {
return nil, err
}
logs, err := pumbaEntryNode.Logs()
if err != nil {
return nil, err
}
err = createLogFile(pumbaEntryNodeName, logs)
if err != nil {
return nil, err
}
err = pumbaEntryNode.Remove()
if err != nil {
return nil, err
}
err = network.WaitForAutopeering(minimumNeighbors)
if err != nil {
return nil, err
}
return network, nil
}
// CreateDRNGNetwork creates and returns a (Docker) Network that contains drand and `peers` GoShimmer nodes.
func (f *Framework) CreateDRNGNetwork(name string, members, peers, minimumNeighbors int) (*DRNGNetwork, error) {
drng, err := newDRNGNetwork(f.dockerClient, strings.ToLower(name), f.tester)
......
......@@ -27,6 +27,8 @@ type Network struct {
entryNode *DockerContainer
entryNodeIdentity *identity.Identity
partitions []*Partition
dockerClient *client.Client
}
......@@ -115,7 +117,10 @@ func (n *Network) CreatePeer(c GoShimmerConfig) (*Peer, error) {
return nil, err
}
peer := newPeer(name, identity.New(publicKey), container)
peer, err := newPeer(name, identity.New(publicKey), container, n)
if err != nil {
return nil, err
}
n.peers = append(n.peers, peer)
return peer, nil
}
......@@ -135,6 +140,12 @@ func (n *Network) Shutdown() error {
}
}
// delete all partitions
err = n.DeletePartitions()
if err != nil {
return err
}
// retrieve logs
logs, err := n.entryNode.Logs()
if err != nil {
......@@ -208,6 +219,10 @@ func (n *Network) WaitForAutopeering(minimumNeighbors int) error {
log.Printf("Waiting for autopeering...\n")
defer log.Printf("Waiting for autopeering... done\n")
if minimumNeighbors == 0 {
return nil
}
for i := autopeeringMaxTries; i > 0; i-- {
for _, p := range n.peers {
......@@ -259,3 +274,147 @@ func (n *Network) Peers() []*Peer {
func (n *Network) RandomPeer() *Peer {
return n.peers[rand.Intn(len(n.peers))]
}
// createPumba creates and starts a Pumba Docker container.
func (n *Network) createPumba(name string, containerName string, targetIPs []string) (*DockerContainer, error) {
container := NewDockerContainer(n.dockerClient)
err := container.CreatePumba(name, containerName, targetIPs)
if err != nil {
return nil, err
}
err = container.Start()
if err != nil {
return nil, err
}
return container, nil
}
// createPartition creates a partition with the given peers.
// It starts a Pumba container for every peer that blocks traffic to all other partitions.
func (n *Network) createPartition(peers []*Peer) (*Partition, error) {
peersMap := make(map[string]*Peer)
for _, peer := range peers {
peersMap[peer.ID().String()] = peer
}
// block all traffic to all other peers except in the current partition
var targetIPs []string
for _, peer := range n.peers {
if _, ok := peersMap[peer.ID().String()]; ok {
continue
}
targetIPs = append(targetIPs, peer.ip)
}
partitionName := n.namePrefix(fmt.Sprintf("partition_%d-", len(n.partitions)))
// create pumba container for every peer in the partition
pumbas := make([]*DockerContainer, len(peers))
for i, p := range peers {
name := partitionName + p.name + containerNameSuffixPumba
pumba, err := n.createPumba(name, p.name, targetIPs)
if err != nil {
return nil, err
}
pumbas[i] = pumba
time.Sleep(1 * time.Second)
}
partition := &Partition{
name: partitionName,
peers: peers,
peersMap: peersMap,
pumbas: pumbas,
}
n.partitions = append(n.partitions, partition)
return partition, nil
}
// DeletePartitions deletes all partitions of the network.
// All nodes can communicate with the full network again.
func (n *Network) DeletePartitions() error {
for _, p := range n.partitions {
err := p.deletePartition()
if err != nil {
return err
}
}
n.partitions = nil
return nil
}
// Partitions returns the network's partitions.
func (n *Network) Partitions() []*Partition {
return n.partitions
}
// Split splits the existing network in given partitions.
func (n *Network) Split(partitions ...[]*Peer) error {
for _, peers := range partitions {
_, err := n.createPartition(peers)
if err != nil {
return err
}
}
// wait until pumba containers are started and block traffic between partitions
time.Sleep(5 * time.Second)
return nil
}
// Partition represents a network partition.
// It contains its peers and the corresponding Pumba instances that block all traffic to peers in other partitions.
type Partition struct {
name string
peers []*Peer
peersMap map[string]*Peer
pumbas []*DockerContainer
}
// Peers returns the partition's peers.
func (p *Partition) Peers() []*Peer {
return p.peers
}
// PeersMap returns the partition's peers map.
func (p *Partition) PeersMap() map[string]*Peer {
return p.peersMap
}
func (p *Partition) String() string {
return fmt.Sprintf("Partition{%s, %s}", p.name, p.peers)
}
// deletePartition deletes a partition, all its Pumba containers and creates logs for them.
func (p *Partition) deletePartition() error {
// stop containers
for _, pumba := range p.pumbas {
err := pumba.Stop()
if err != nil {
return err
}
}
// retrieve logs
for i, pumba := range p.pumbas {
logs, err := pumba.Logs()
if err != nil {
return err
}
err = createLogFile(fmt.Sprintf("%s%s", p.name, p.peers[i].name), logs)
if err != nil {
return err
}
}
for _, pumba := range p.pumbas {
err := pumba.Remove()
if err != nil {
return err
}
}
return nil
}
package framework
const (
autopeeringMaxTries = 50
autopeeringMaxTries = 25
apiPort = "8080"
containerNameTester = "/tester"
containerNameEntryNode = "entry_node"
containerNameReplica = "replica_"
containerNameDrand = "drand_"
containerNameTester = "/tester"
containerNameEntryNode = "entry_node"
containerNameReplica = "replica_"
containerNameDrand = "drand_"
containerNameSuffixPumba = "_pumba"
logsDir = "/tmp/logs/"
disabledPluginsEntryNode = "portcheck,dashboard,analysis-client,gossip,drng,issuer,sync,metrics,messagelayer,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint"
disabledPluginsPeer = "portcheck,dashboard,analysis-client"
disabledPluginsEntryNode = "portcheck,dashboard,analysis-client,profiling,gossip,drng,issuer,sync,metrics,messagelayer,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint"
disabledPluginsPeer = "portcheck,dashboard,analysis-client,profiling"
dockerLogsPrefixLen = 8
......
......@@ -14,6 +14,7 @@ import (
type Peer struct {
// name of the GoShimmer instance, Docker container and hostname
name string
ip string
// GoShimmer identity
*identity.Identity
......@@ -28,13 +29,21 @@ type Peer struct {
}
// newPeer creates a new instance of Peer with the given information.
func newPeer(name string, identity *identity.Identity, dockerContainer *DockerContainer) *Peer {
// dockerContainer needs to be started in order to determine the container's (and therefore peer's) IP correctly.
func newPeer(name string, identity *identity.Identity, dockerContainer *DockerContainer, network *Network) (*Peer, error) {
// after container is started we can get its IP
ip, err := dockerContainer.IP(network.name)
if err != nil {
return nil, err
}
return &Peer{
name: name,
ip: ip,
Identity: identity,
GoShimmerAPI: client.NewGoShimmerAPI(getWebAPIBaseURL(name), http.Client{Timeout: 30 * time.Second}),
DockerContainer: dockerContainer,
}
}, nil
}
func (p *Peer) String() string {
......
package tests
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNetworkSplit(t *testing.T) {
n, err := f.CreateNetworkWithPartitions("autopeering_TestNetworkSplit", 6, 2, 2)
require.NoError(t, err)
defer ShutdownNetwork(t, n)
// test that nodes only have neighbors from same partition
for _, partition := range n.Partitions() {
for _, peer := range partition.Peers() {
resp, err := peer.GetNeighbors(false)
require.NoError(t, err)
// check that all neighbors are indeed in the same partition
for _, n := range resp.Accepted {
assert.Contains(t, partition.PeersMap(), n.ID)
}
for _, n := range resp.Chosen {
assert.Contains(t, partition.PeersMap(), n.ID)
}
}
}
err = n.DeletePartitions()
require.NoError(t, err)
// let them mingle and check that they all peer with each other
err = n.WaitForAutopeering(4)
require.NoError(t, err)
}
......@@ -65,7 +65,7 @@ func TestSynchronization(t *testing.T) {
// 8. issue some messages on old peers so that new peer can sync again
ids = sendDataMessagesOnRandomPeer(t, n.Peers()[:initalPeers], 10, ids)
// wait for peer to sync
time.Sleep(5 * time.Second)
time.Sleep(10 * time.Second)
// 9. newPeer becomes synced again
resp, err = newPeer.Info()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment