From 57fb70b7f08122b2acd6d548659a2726b7ea42e5 Mon Sep 17 00:00:00 2001 From: Luca Moser <moser.luca@gmail.com> Date: Fri, 19 Jun 2020 15:49:17 +0200 Subject: [PATCH] Adds consensus integration test (#468) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Feat: initial commit * Feat: added setPreferred to TransactionMetadata * Feat: added a Conflicting() method to the transactionMetadata * Fix: fixed logic bug * Feat: refactored fcob * Refactor: refactored additional code * Fix: fixed a bug in ForeachConsumers * Refactor: cleaned up code * Feat: implemented FCOB consensus into the valuetransfer dapp * Refactor: refactored FCOB * Docs: added some additional comments * Docs: fixed comments * add branch manager conflict test * cleans failing test * Refactor: commit before branch change * Fix: fixed bug in AggregateBranches * assert aggr. branch IDs * expands branch conflict detection test * add visualisation of branch graph of test * Feat: added PayloadLiked Event * Refactor: fixed some missing comments + added liked to marshal * Feat: reworked the preferred and liked propagation * Refactor: cleaned up some logic * Refactor: simplified code * Refactor: cleaned up more stuff :P * Refactor: refactor * Feat: moved test + refactored fcob * adds more tests * fixes liked state not getting updated correctly of conflict members * adds additional liked/preferred propagation test * Fix: fixed missing preferred propagation to aggregated branches * Fix: fixed a few bugs in liked propagation * adapt to new hive.go version * upgrade hive.go * Feat: started implementing a wallet * Feat: extended wallet files * remove weird test * use mem db for tests * more tests * use store backed sequence * add option to use in-memory database * address review comments * First tests for individual components of AttachPayloadSync * Fix: fixed missing events in branchmanaer * Feat: propagate changes from branch to transaction * Add tests for checkTransactionOutputs * Feat: started implementing confirmed propagation * Fix: fixed unreachable code * Add more tests * Refactor: refactored some code according to wolfgangs review * Refactor: cleaned up the code according to DRY * Refactor: refactored according to wollac * Refactor: refactored according to wollac * Refactor: refactored according to wollac * Refactor: refactored the code to make it more readable * Refactor: added some doc comments + cleaned up some more code * :white_check_mark: adds orderedMap unit tests * :rotating_light: Fix linter warnings * test: Add queue unit tests * Add more tests * :lipstick: Adjust imports order * WIP more tests * :white_check_mark: Add TestBookTransaction * :white_check_mark: Update TestBookTransaction * Add more tests * :construction: WIP tests * ✅ Add TestCalculateBranchOfTransaction * ✅ Add TestMoveTransactionToBranch * ✅ Add TestFork * ✅ Add TestBookPayload * Add test for checkPayloadSolidity * ✅ Add TestSetTransactionPreferred * Add more tests * ✅ Fix Tangle test * Feat: started implementing lucas test cases * Feat: fixed some issued + further tests * Feat: started adding invalid txs check * Feat: added removal logic for invalid transactions * Refactor: removed Println * ✅ Add test for 2nd Reattachment * feat: Add first value transfer integration test * fix: fix wrong plugin name * ✅ Add aggregated branches test cases * Feat: added a method to generate AggregatedBranchIDs * 🎨 Use GenerateAggregatedBranchID in test * Feat: refactored delete logic * Fix: fixed broken test * Feat: added final test cases for invalid txs / payloads * 🚧 WIP * Value tangle concurrency tests (#451) * Add simple concurrency test * Add reverse and concurrent transaction and value object solidification tests and fix bug when value object was visited more than once * Add some documentation to make tests easily understandable * WIP propagation tests but fixed already couple of bugs * Fix: fixed some bugs * Feat: added propagation to inclusion states to tx and its outputs * Feat: finished the propagation down to the tx and its outputs * WIP propagation tests and fix bugs * Add colored tokens test * Add value tangle test to github workflow * fix: Fix wrong function name in comments * refactor: Make testSnapshots disabled in default and minor tweaks * Feat: fixed some issues and introduced a Debugger * Refactor: added a few comments * Split massive test file into slightly more digestible chunks * Clean up propagation tests * Feat: fixed bugs * Feat: enabled missing tests * Add some documentation and missing checks for aggregated branches * Clean up tangle tests * adds snapshot type * Fix: finalized wasn't propagated when a branch was rejected * implements ReadFrom and WriteTo for Snapshot * read in snapshot file if snapshot path is defined * renames snapshot test file * WIP debugging concurrency bug of death * Feat: added more reliable fails in test case * Fix: fixes a race condition in solidification * Clean up test * adds assets volume to integration test containers * fixes some asserts * adds non-working conflict integration test * check transaction availability in partition * renames integration test * lower amount of peers * first passing version of consensus integration test * remove debug printlns * do all integration tests again * increases avg. network delay fcob rule, removes debug printlns * go mod tidy by Marie Kondō * renames incl. state. conflict to conflicting * go fmt tangle.go * go fmt tangle_test, goimports dapp.go * goimports again because the dog is sad * run consensus integration test on the CI * use explicit pumba version 0.7.2 * pray to the CI gods for the test to pass * fix panic when tangle.Fork() is called * readd all tests again * reset integration framework paras * removes test snapshot plugin * get rid of test snapshot plugin * fixes wrong use of Println * removes random tool * removes duplicated value entry in GH CI workflows * xxx * wip * fixes integration test Co-authored-by: Hans Moog <hm@mkjc.net> Co-authored-by: Wolfgang Welz <welzwo@gmail.com> Co-authored-by: jonastheis <mail@jonastheis.de> Co-authored-by: capossele <angelocapossele@gmail.com> Co-authored-by: jkrvivian <jkrvivian@gmail.com> --- .github/workflows/integration-tests.yml | 45 +++- dapps/valuetransfers/dapp.go | 39 +++- .../packages/balance/balance.go | 30 +-- .../packages/balance/balance_test.go | 8 +- .../valuetransfers/packages/consensus/fcob.go | 4 +- .../packages/tangle/ledgerstate.go | 2 +- .../packages/tangle/snapshot.go | 125 +++++++++++ .../packages/tangle/snapshot_test.go | 88 ++++++++ .../valuetransfers/packages/tangle/tangle.go | 38 ++-- .../packages/tangle/tangle_test.go | 31 +-- go.mod | 1 + go.sum | 2 + pluginmgr/core/plugins.go | 2 - plugins/autopeering/parameters.go | 4 + plugins/dashboard/payload_handler.go | 4 +- plugins/testsnapshots/plugin.go | 41 ---- .../value/gettransactionbyid/handler.go | 34 +-- .../webapi/value/sendtransaction/handler.go | 4 - .../webapi/value/unspentoutputs/handler.go | 35 +-- .../webapi/value/utils/transaction_handler.go | 15 +- ...x5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin | Bin 0 -> 129 bytes tools/integration-tests/assets/entrypoint.sh | 9 + tools/integration-tests/runTests.sh | 2 +- .../tester/docker-compose.yml | 10 +- .../tester/framework/docker.go | 11 +- .../tester/framework/framework.go | 23 +- .../tester/framework/network.go | 3 +- .../tester/framework/parameters.go | 23 +- tools/integration-tests/tester/go.mod | 5 +- tools/integration-tests/tester/go.sum | 3 + .../consensus/consensus_conflicts_test.go | 209 +++++++++++++++++ .../consensus/consensus_noconflicts_test.go | 129 +++++++++++ .../tester/tests/consensus/main_test.go | 23 ++ .../tester/tests/testutil.go | 211 +++++++++++++++++- .../tester/tests/value/value_test.go | 54 +++-- 35 files changed, 1057 insertions(+), 210 deletions(-) create mode 100644 dapps/valuetransfers/packages/tangle/snapshot.go create mode 100644 dapps/valuetransfers/packages/tangle/snapshot_test.go delete mode 100644 plugins/testsnapshots/plugin.go create mode 100644 tools/integration-tests/assets/7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin create mode 100755 tools/integration-tests/assets/entrypoint.sh create mode 100644 tools/integration-tests/tester/tests/consensus/consensus_conflicts_test.go create mode 100644 tools/integration-tests/tester/tests/consensus/consensus_noconflicts_test.go create mode 100644 tools/integration-tests/tester/tests/consensus/main_test.go diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index cd08c1e7..2e21208f 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -20,9 +20,9 @@ jobs: - name: Pull additional Docker images run: | docker pull angelocapossele/drand:latest - docker pull gaiaadm/pumba:latest + docker pull gaiaadm/pumba:0.7.2 docker pull gaiadocker/iproute2:latest - + - name: Run integration tests run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build @@ -55,7 +55,41 @@ jobs: - name: Pull additional Docker images run: | docker pull angelocapossele/drand:latest - docker pull gaiaadm/pumba:latest + docker pull gaiaadm/pumba:0.7.2 + docker pull gaiadocker/iproute2:latest + + - name: Run integration tests + run: docker-compose -f tools/integration-tests/tester/docker-compose.yml up --abort-on-container-exit --exit-code-from tester --build + + - name: Create logs from tester + if: always() + run: | + docker logs tester &> tools/integration-tests/logs/tester.log + + - name: Save logs as artifacts + if: always() + uses: actions/upload-artifact@v1 + with: + name: ${{ env.TEST_NAME }} + path: tools/integration-tests/logs + + consensus: + name: consensus + env: + TEST_NAME: consensus + runs-on: ubuntu-latest + steps: + + - name: Check out code + uses: actions/checkout@v2 + + - name: Build GoShimmer image + run: docker build -t iotaledger/goshimmer . + + - name: Pull additional Docker images + run: | + docker pull angelocapossele/drand:latest + docker pull gaiaadm/pumba:0.7.2 docker pull gaiadocker/iproute2:latest - name: Run integration tests @@ -90,7 +124,7 @@ jobs: - name: Pull additional Docker images run: | docker pull angelocapossele/drand:latest - docker pull gaiaadm/pumba:latest + docker pull gaiaadm/pumba:0.7.2 docker pull gaiadocker/iproute2:latest - name: Run integration tests @@ -126,7 +160,7 @@ jobs: - name: Pull additional Docker images run: | docker pull angelocapossele/drand:latest - docker pull gaiaadm/pumba:latest + docker pull gaiaadm/pumba:0.7.2 docker pull gaiadocker/iproute2:latest - name: Run integration tests @@ -145,6 +179,7 @@ jobs: path: tools/integration-tests/logs + value: name: value env: diff --git a/dapps/valuetransfers/dapp.go b/dapps/valuetransfers/dapp.go index fd63cfe7..c0a15aa0 100644 --- a/dapps/valuetransfers/dapp.go +++ b/dapps/valuetransfers/dapp.go @@ -1,9 +1,12 @@ package valuetransfers import ( + "os" "sync" "time" + flag "github.com/spf13/pflag" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/consensus" "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload" valuepayload "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/payload" @@ -13,6 +16,7 @@ import ( messageTangle "github.com/iotaledger/goshimmer/packages/binary/messagelayer/tangle" "github.com/iotaledger/goshimmer/packages/shutdown" "github.com/iotaledger/goshimmer/packages/vote" + "github.com/iotaledger/goshimmer/plugins/config" "github.com/iotaledger/goshimmer/plugins/database" "github.com/iotaledger/goshimmer/plugins/messagelayer" "github.com/iotaledger/hive.go/daemon" @@ -25,10 +29,21 @@ const ( // PluginName contains the human readable name of the plugin. PluginName = "ValueTransfers" - // AverageNetworkDelay contains the average time it takes for a network to propagate through gossip. - AverageNetworkDelay = 5 * time.Second + // DefaultAverageNetworkDelay contains the default average time it takes for a network to propagate through gossip. + DefaultAverageNetworkDelay = 5 * time.Second + + // CfgValueLayerSnapshotFile is the path to the snapshot file. + CfgValueLayerSnapshotFile = "valueLayer.snapshot.file" + + // CfgValueLayerFCOBAverageNetworkDelay is the avg. network delay to use for FCoB rules + CfgValueLayerFCOBAverageNetworkDelay = "valueLayer.fcob.averageNetworkDelay" ) +func init() { + flag.String(CfgValueLayerSnapshotFile, "", "the path to the snapshot file") + flag.Int(CfgValueLayerFCOBAverageNetworkDelay, 5, "the avg. network delay to use for FCoB rules") +} + var ( // App is the "plugin" instance of the value-transfers application. App = node.NewPlugin(PluginName, node.Enabled, configure, run) @@ -58,6 +73,22 @@ func configure(_ *node.Plugin) { // configure Tangle Tangle = tangle.New(database.Store()) + + // read snapshot file + snapshotFilePath := config.Node.GetString(CfgValueLayerSnapshotFile) + if len(snapshotFilePath) != 0 { + snapshot := tangle.Snapshot{} + f, err := os.Open(snapshotFilePath) + if err != nil { + log.Panic("can not open snapshot file:", err) + } + if _, err := snapshot.ReadFrom(f); err != nil { + log.Panic("could not read snapshot file:", err) + } + Tangle.LoadSnapshot(snapshot) + log.Infof("read snapshot from %s", snapshotFilePath) + } + Tangle.Events.Error.Attach(events.NewClosure(func(err error) { log.Error(err) })) @@ -76,7 +107,9 @@ func configure(_ *node.Plugin) { })) // configure FCOB consensus rules - FCOB = consensus.NewFCOB(Tangle, AverageNetworkDelay) + cfgAvgNetworkDelay := config.Node.GetInt(CfgValueLayerFCOBAverageNetworkDelay) + log.Infof("avg. network delay configured to %d seconds", cfgAvgNetworkDelay) + FCOB = consensus.NewFCOB(Tangle, time.Duration(cfgAvgNetworkDelay)*time.Second) FCOB.Events.Vote.Attach(events.NewClosure(func(id string, initOpn vote.Opinion) { if err := voter.Vote(id, initOpn); err != nil { log.Error(err) diff --git a/dapps/valuetransfers/packages/balance/balance.go b/dapps/valuetransfers/packages/balance/balance.go index 142b95f5..6559145b 100644 --- a/dapps/valuetransfers/packages/balance/balance.go +++ b/dapps/valuetransfers/packages/balance/balance.go @@ -8,15 +8,17 @@ import ( // Balance represents a balance in the IOTA ledger. It consists out of a numeric value and a color. type Balance struct { - value int64 - color Color + // The numeric value of the balance. + Value int64 `json:"value"` + // The color of the balance. + Color Color `json:"color"` } // New creates a new Balance with the given details. func New(color Color, balance int64) (result *Balance) { result = &Balance{ - color: color, - value: balance, + Color: color, + Value: balance, } return @@ -28,7 +30,7 @@ func FromBytes(bytes []byte) (result *Balance, consumedBytes int, err error) { marshalUtil := marshalutil.New(bytes) - result.value, err = marshalUtil.ReadInt64() + result.Value, err = marshalUtil.ReadInt64() if err != nil { return } @@ -40,7 +42,7 @@ func FromBytes(bytes []byte) (result *Balance, consumedBytes int, err error) { return nil, marshalUtil.ReadOffset(), colorErr } - result.color = coinColor.(Color) + result.Color = coinColor.(Color) consumedBytes = marshalUtil.ReadOffset() return @@ -56,29 +58,19 @@ func Parse(marshalUtil *marshalutil.MarshalUtil) (*Balance, error) { return address.(*Balance), nil } -// Value returns the numeric value of the balance. -func (balance *Balance) Value() int64 { - return balance.value -} - -// Color returns the Color of the balance. -func (balance *Balance) Color() Color { - return balance.color -} - // Bytes marshals the Balance into a sequence of bytes. func (balance *Balance) Bytes() []byte { marshalUtil := marshalutil.New(Length) - marshalUtil.WriteInt64(balance.value) - marshalUtil.WriteBytes(balance.color.Bytes()) + marshalUtil.WriteInt64(balance.Value) + marshalUtil.WriteBytes(balance.Color.Bytes()) return marshalUtil.Bytes() } // String creates a human readable string of the Balance. func (balance *Balance) String() string { - return strconv.FormatInt(balance.value, 10) + " " + balance.color.String() + return strconv.FormatInt(balance.Value, 10) + " " + balance.Color.String() } // Length encodes the length of a marshaled Balance (the length of the color + 8 bytes for the balance). diff --git a/dapps/valuetransfers/packages/balance/balance_test.go b/dapps/valuetransfers/packages/balance/balance_test.go index 5dd98567..35401c66 100644 --- a/dapps/valuetransfers/packages/balance/balance_test.go +++ b/dapps/valuetransfers/packages/balance/balance_test.go @@ -8,8 +8,8 @@ import ( func TestMarshalUnmarshal(t *testing.T) { balance := New(ColorIOTA, 1337) - assert.Equal(t, int64(1337), balance.Value()) - assert.Equal(t, ColorIOTA, balance.Color()) + assert.Equal(t, int64(1337), balance.Value) + assert.Equal(t, ColorIOTA, balance.Color) marshaledBalance := balance.Bytes() assert.Equal(t, Length, len(marshaledBalance)) @@ -19,6 +19,6 @@ func TestMarshalUnmarshal(t *testing.T) { panic(err) } assert.Equal(t, Length, consumedBytes) - assert.Equal(t, balance.value, restoredBalance.Value()) - assert.Equal(t, balance.color, restoredBalance.Color()) + assert.Equal(t, balance.Value, restoredBalance.Value) + assert.Equal(t, balance.Color, restoredBalance.Color) } diff --git a/dapps/valuetransfers/packages/consensus/fcob.go b/dapps/valuetransfers/packages/consensus/fcob.go index d5b86548..1d2a98aa 100644 --- a/dapps/valuetransfers/packages/consensus/fcob.go +++ b/dapps/valuetransfers/packages/consensus/fcob.go @@ -3,6 +3,7 @@ package consensus import ( "time" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager" "github.com/iotaledger/hive.go/events" "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tangle" @@ -138,9 +139,10 @@ func (fcob *FCOB) setFinalized(cachedTransactionMetadata *tangle.CachedTransacti // onFork triggers a voting process whenever a Transaction gets forked into a new Branch. The initial opinion is derived // from the preferred flag that was set using the FCOB rule. -func (fcob *FCOB) onFork(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *tangle.CachedTransactionMetadata) { +func (fcob *FCOB) onFork(cachedTransaction *transaction.CachedTransaction, cachedTransactionMetadata *tangle.CachedTransactionMetadata, cachedTargetBranch *branchmanager.CachedBranch, conflictingInputs []transaction.OutputID) { defer cachedTransaction.Release() defer cachedTransactionMetadata.Release() + defer cachedTargetBranch.Release() transactionMetadata := cachedTransactionMetadata.Unwrap() if transactionMetadata == nil { diff --git a/dapps/valuetransfers/packages/tangle/ledgerstate.go b/dapps/valuetransfers/packages/tangle/ledgerstate.go index 5c0d8c31..9b676acf 100644 --- a/dapps/valuetransfers/packages/tangle/ledgerstate.go +++ b/dapps/valuetransfers/packages/tangle/ledgerstate.go @@ -25,7 +25,7 @@ func (ledgerState *LedgerState) Balances(address address.Address) (coloredBalanc ledgerState.tangle.OutputsOnAddress(address).Consume(func(output *Output) { if output.ConsumerCount() == 0 { for _, coloredBalance := range output.Balances() { - coloredBalances[coloredBalance.Color()] += coloredBalance.Value() + coloredBalances[coloredBalance.Color] += coloredBalance.Value } } }) diff --git a/dapps/valuetransfers/packages/tangle/snapshot.go b/dapps/valuetransfers/packages/tangle/snapshot.go new file mode 100644 index 00000000..b13df4d8 --- /dev/null +++ b/dapps/valuetransfers/packages/tangle/snapshot.go @@ -0,0 +1,125 @@ +package tangle + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" +) + +// Snapshot defines a snapshot of the ledger state. +type Snapshot map[transaction.ID]map[address.Address][]*balance.Balance + +// WriteTo writes the snapshot data to the given writer in the following format: +// transaction_count(int64) +// -> transaction_count * transaction_id(32byte) +// ->address_count(int64) +// ->address_count * address(33byte) +// ->balance_count(int64) +// ->balance_count * value(int64)+color(32byte) +func (s Snapshot) WriteTo(writer io.Writer) (int64, error) { + var bytesWritten int64 + transactionCount := len(s) + if err := binary.Write(writer, binary.LittleEndian, int64(transactionCount)); err != nil { + return 0, fmt.Errorf("unable to write transactions count: %w", err) + } + bytesWritten += 8 + for txID, addresses := range s { + if err := binary.Write(writer, binary.LittleEndian, txID); err != nil { + return bytesWritten, fmt.Errorf("unable to write transaction ID: %w", err) + } + bytesWritten += transaction.IDLength + if err := binary.Write(writer, binary.LittleEndian, int64(len(addresses))); err != nil { + return bytesWritten, fmt.Errorf("unable to write address count: %w", err) + } + bytesWritten += 8 + for addr, balances := range addresses { + if err := binary.Write(writer, binary.LittleEndian, addr); err != nil { + return bytesWritten, fmt.Errorf("unable to write address: %w", err) + } + bytesWritten += address.Length + if err := binary.Write(writer, binary.LittleEndian, int64(len(balances))); err != nil { + return bytesWritten, fmt.Errorf("unable to write balance count: %w", err) + } + bytesWritten += 8 + for _, bal := range balances { + if err := binary.Write(writer, binary.LittleEndian, bal.Value); err != nil { + return bytesWritten, fmt.Errorf("unable to write balance value: %w", err) + } + bytesWritten += 8 + if err := binary.Write(writer, binary.LittleEndian, bal.Color); err != nil { + return bytesWritten, fmt.Errorf("unable to write balance color: %w", err) + } + bytesWritten += balance.ColorLength + } + } + } + + return bytesWritten, nil +} + +// ReadFrom reads the snapshot bytes from the given reader. +// This function overrides existing content of the snapshot. +func (s Snapshot) ReadFrom(reader io.Reader) (int64, error) { + var bytesRead int64 + var transactionCount int64 + if err := binary.Read(reader, binary.LittleEndian, &transactionCount); err != nil { + return 0, fmt.Errorf("unable to read transaction count: %w", err) + } + bytesRead += 8 + + var i int64 + for ; i < transactionCount; i++ { + txIDBytes := make([]byte, transaction.IDLength) + if err := binary.Read(reader, binary.LittleEndian, txIDBytes); err != nil { + return bytesRead, fmt.Errorf("unable to read transaction ID: %w", err) + } + bytesRead += transaction.IDLength + var addrCount int64 + if err := binary.Read(reader, binary.LittleEndian, &addrCount); err != nil { + return bytesRead, fmt.Errorf("unable to read address count: %w", err) + } + bytesRead += 8 + txAddrMap := make(map[address.Address][]*balance.Balance, addrCount) + var j int64 + for ; j < addrCount; j++ { + addrBytes := make([]byte, address.Length) + if err := binary.Read(reader, binary.LittleEndian, addrBytes); err != nil { + return bytesRead, fmt.Errorf("unable to read address: %w", err) + } + bytesRead += address.Length + var balanceCount int64 + if err := binary.Read(reader, binary.LittleEndian, &balanceCount); err != nil { + return bytesRead, fmt.Errorf("unable to read balance count: %w", err) + } + bytesRead += 8 + + balances := make([]*balance.Balance, balanceCount) + var k int64 + for ; k < balanceCount; k++ { + var value int64 + if err := binary.Read(reader, binary.LittleEndian, &value); err != nil { + return bytesRead, fmt.Errorf("unable to read balance value: %w", err) + } + bytesRead += 8 + color := balance.Color{} + if err := binary.Read(reader, binary.LittleEndian, &color); err != nil { + return bytesRead, fmt.Errorf("unable to read balance color: %w", err) + } + bytesRead += balance.ColorLength + balances[k] = &balance.Balance{Value: value, Color: color} + } + addr := address.Address{} + copy(addr[:], addrBytes) + txAddrMap[addr] = balances + } + txID := transaction.ID{} + copy(txID[:], txIDBytes) + s[txID] = txAddrMap + } + + return bytesRead, nil +} diff --git a/dapps/valuetransfers/packages/tangle/snapshot_test.go b/dapps/valuetransfers/packages/tangle/snapshot_test.go new file mode 100644 index 00000000..7ef8b153 --- /dev/null +++ b/dapps/valuetransfers/packages/tangle/snapshot_test.go @@ -0,0 +1,88 @@ +package tangle + +import ( + "bytes" + "testing" + + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/branchmanager" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/wallet" + "github.com/iotaledger/hive.go/kvstore/mapdb" + "github.com/stretchr/testify/assert" +) + +func TestLoadSnapshot(t *testing.T) { + tangle := New(mapdb.NewMapDB()) + + snapshot := map[transaction.ID]map[address.Address][]*balance.Balance{ + transaction.GenesisID: { + address.Random(): []*balance.Balance{ + balance.New(balance.ColorIOTA, 337), + }, + + address.Random(): []*balance.Balance{ + balance.New(balance.ColorIOTA, 1000), + balance.New(balance.ColorIOTA, 1000), + }, + }, + } + tangle.LoadSnapshot(snapshot) + + // check whether outputs can be retrieved from tangle + for addr, balances := range snapshot[transaction.GenesisID] { + cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(addr, transaction.GenesisID)) + cachedOutput.Consume(func(output *Output) { + assert.Equal(t, addr, output.Address()) + assert.ElementsMatch(t, balances, output.Balances()) + assert.True(t, output.Solid()) + assert.Equal(t, branchmanager.MasterBranchID, output.BranchID()) + }) + } +} + +func TestSnapshotMarshalUnmarshal(t *testing.T) { + const genesisBalance = 1000000000 + seed := wallet.NewSeed() + genesisAddr := seed.Address(GENESIS) + + snapshot := Snapshot{ + transaction.GenesisID: { + genesisAddr: { + balance.New(balance.ColorIOTA, genesisBalance), + }, + }, + } + + // includes txs count + const int64ByteSize = 8 + expectedLength := int64ByteSize + for _, addresses := range snapshot { + // tx id + expectedLength += transaction.IDLength + // addr count + expectedLength += int64ByteSize + for _, balances := range addresses { + // addr + expectedLength += address.Length + // balance count + expectedLength += int64ByteSize + // balances + expectedLength += len(balances) * (int64ByteSize + balance.ColorLength) + } + } + + var buf bytes.Buffer + written, err := snapshot.WriteTo(&buf) + assert.NoError(t, err, "writing the snapshot to the buffer should succeed") + assert.EqualValues(t, expectedLength, written, "written byte count should match the expected count") + + snapshotFromBytes := Snapshot{} + read, err := snapshotFromBytes.ReadFrom(&buf) + assert.NoError(t, err, "expected no error from reading valid snapshot bytes") + assert.EqualValues(t, expectedLength, read, "read byte count should match the expected count") + + // check that the source and unmarshaled snapshot are equivalent + assert.Equal(t, snapshot, snapshotFromBytes) +} diff --git a/dapps/valuetransfers/packages/tangle/tangle.go b/dapps/valuetransfers/packages/tangle/tangle.go index e8787086..7c91dec1 100644 --- a/dapps/valuetransfers/packages/tangle/tangle.go +++ b/dapps/valuetransfers/packages/tangle/tangle.go @@ -1373,8 +1373,8 @@ func (tangle *Tangle) bookTransaction(cachedTransaction *transaction.CachedTrans // create correctly colored balances (replacing color of newly minted coins with color of transaction id) coloredBalances := make([]*balance.Balance, len(balances)) for i, currentBalance := range balances { - if currentBalance.Color() == balance.ColorNew { - coloredBalances[i] = balance.New(mintedColor, currentBalance.Value()) + if currentBalance.Color == balance.ColorNew { + coloredBalances[i] = balance.New(mintedColor, currentBalance.Value) } else { coloredBalances[i] = currentBalance } @@ -1594,9 +1594,9 @@ func (tangle *Tangle) retrieveConsumedInputDetails(tx *transaction.Transaction) // calculate the input balances for _, inputBalance := range input.Balances() { var newBalance int64 - if currentBalance, balanceExists := consumedBalances[inputBalance.Color()]; balanceExists { + if currentBalance, balanceExists := consumedBalances[inputBalance.Color]; balanceExists { // check overflows in the numbers - if inputBalance.Value() > math.MaxInt64-currentBalance { + if inputBalance.Value > math.MaxInt64-currentBalance { // TODO: make it an explicit error var err = fmt.Errorf("buffer overflow in balances of inputs: %w", ErrTransactionInvalid) @@ -1605,11 +1605,11 @@ func (tangle *Tangle) retrieveConsumedInputDetails(tx *transaction.Transaction) return } - newBalance = currentBalance + inputBalance.Value() + newBalance = currentBalance + inputBalance.Value } else { - newBalance = inputBalance.Value() + newBalance = inputBalance.Value } - consumedBalances[inputBalance.Color()] = newBalance + consumedBalances[inputBalance.Color] = newBalance } } inputsSolid = true @@ -1630,51 +1630,51 @@ func (tangle *Tangle) checkTransactionOutputs(inputBalances map[balance.Color]in aborted := !outputs.ForEach(func(address address.Address, balances []*balance.Balance) bool { for _, outputBalance := range balances { // abort if the output creates a negative or empty output - if outputBalance.Value() <= 0 { + if outputBalance.Value <= 0 { return false } // sidestep logic if we have a newly colored output (we check the supply later) - if outputBalance.Color() == balance.ColorNew { + if outputBalance.Color == balance.ColorNew { // catch overflows - if newlyColoredCoins > math.MaxInt64-outputBalance.Value() { + if newlyColoredCoins > math.MaxInt64-outputBalance.Value { return false } - newlyColoredCoins += outputBalance.Value() + newlyColoredCoins += outputBalance.Value continue } // sidestep logic if we have ColorIOTA - if outputBalance.Color() == balance.ColorIOTA { + if outputBalance.Color == balance.ColorIOTA { // catch overflows - if uncoloredCoins > math.MaxInt64-outputBalance.Value() { + if uncoloredCoins > math.MaxInt64-outputBalance.Value { return false } - uncoloredCoins += outputBalance.Value() + uncoloredCoins += outputBalance.Value continue } // check if the used color does not exist in our supply - availableBalance, spentColorExists := inputBalances[outputBalance.Color()] + availableBalance, spentColorExists := inputBalances[outputBalance.Color] if !spentColorExists { return false } // abort if we spend more coins of the given color than we have - if availableBalance < outputBalance.Value() { + if availableBalance < outputBalance.Value { return false } // subtract the spent coins from the supply of this color - inputBalances[outputBalance.Color()] -= outputBalance.Value() + inputBalances[outputBalance.Color] -= outputBalance.Value // cleanup empty map entries (we have exhausted our funds) - if inputBalances[outputBalance.Color()] == 0 { - delete(inputBalances, outputBalance.Color()) + if inputBalances[outputBalance.Color] == 0 { + delete(inputBalances, outputBalance.Color) } } diff --git a/dapps/valuetransfers/packages/tangle/tangle_test.go b/dapps/valuetransfers/packages/tangle/tangle_test.go index b00f07ee..aab20994 100644 --- a/dapps/valuetransfers/packages/tangle/tangle_test.go +++ b/dapps/valuetransfers/packages/tangle/tangle_test.go @@ -926,35 +926,6 @@ func TestGetCachedOutputsFromTransactionInputs(t *testing.T) { } } -func TestLoadSnapshot(t *testing.T) { - tangle := New(mapdb.NewMapDB()) - - snapshot := map[transaction.ID]map[address.Address][]*balance.Balance{ - transaction.GenesisID: { - address.Random(): []*balance.Balance{ - balance.New(balance.ColorIOTA, 337), - }, - - address.Random(): []*balance.Balance{ - balance.New(balance.ColorIOTA, 1000), - balance.New(balance.ColorIOTA, 1000), - }, - }, - } - tangle.LoadSnapshot(snapshot) - - // check whether outputs can be retrieved from tangle - for addr, balances := range snapshot[transaction.GenesisID] { - cachedOutput := tangle.TransactionOutput(transaction.NewOutputID(addr, transaction.GenesisID)) - cachedOutput.Consume(func(output *Output) { - assert.Equal(t, addr, output.Address()) - assert.ElementsMatch(t, balances, output.Balances()) - assert.True(t, output.Solid()) - assert.Equal(t, branchmanager.MasterBranchID, output.BranchID()) - }) - } -} - func TestRetrieveConsumedInputDetails(t *testing.T) { // test simple happy case { @@ -1649,7 +1620,7 @@ func sumOutputsByColor(outputs map[address.Address][]*balance.Balance) map[balan for _, balances := range outputs { for _, bal := range balances { - totals[bal.Color()] += bal.Value() + totals[bal.Color] += bal.Value } } diff --git a/go.mod b/go.mod index 7d5fd68a..9fa7fb4a 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/drand/kyber v1.0.1-0.20200331114745-30e90cc60f99 github.com/gobuffalo/packr/v2 v2.7.1 github.com/golang/protobuf v1.3.5 + github.com/google/go-cmp v0.4.1 github.com/gorilla/websocket v1.4.1 github.com/iotaledger/hive.go v0.0.0-20200617164933-c48b4401b814 github.com/iotaledger/iota.go v1.0.0-beta.14 diff --git a/go.sum b/go.sum index 4586afd0..d81ca848 100644 --- a/go.sum +++ b/go.sum @@ -120,6 +120,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= diff --git a/pluginmgr/core/plugins.go b/pluginmgr/core/plugins.go index eb4ff3b6..1eba3f11 100644 --- a/pluginmgr/core/plugins.go +++ b/pluginmgr/core/plugins.go @@ -18,7 +18,6 @@ import ( "github.com/iotaledger/goshimmer/plugins/portcheck" "github.com/iotaledger/goshimmer/plugins/profiling" "github.com/iotaledger/goshimmer/plugins/sync" - "github.com/iotaledger/goshimmer/plugins/testsnapshots" "github.com/iotaledger/hive.go/node" ) @@ -41,5 +40,4 @@ var PLUGINS = node.Plugins( metrics.Plugin, drng.Plugin, valuetransfers.App, - testsnapshots.Plugin, ) diff --git a/plugins/autopeering/parameters.go b/plugins/autopeering/parameters.go index 01022ef3..0cd2088f 100644 --- a/plugins/autopeering/parameters.go +++ b/plugins/autopeering/parameters.go @@ -7,8 +7,12 @@ import ( const ( // CfgEntryNodes defines the config flag of the entry nodes. CfgEntryNodes = "autopeering.entryNodes" + + // CfgOutboundUpdateIntervalMs time after which out neighbors are updated. + CfgOutboundUpdateIntervalMs = "autopeering.outboundUpdateIntervalMs" ) func init() { flag.StringSlice(CfgEntryNodes, []string{"V8LYtWWcPYYDTTXLeIEFjJEuWlsjDiI0+Pq/Cx9ai6g=@116.202.49.178:14626"}, "list of trusted entry nodes for auto peering") + flag.Int(CfgOutboundUpdateIntervalMs, 10, "time after which out neighbors are updated") } diff --git a/plugins/dashboard/payload_handler.go b/plugins/dashboard/payload_handler.go index db8ab642..6ffa3593 100644 --- a/plugins/dashboard/payload_handler.go +++ b/plugins/dashboard/payload_handler.go @@ -135,8 +135,8 @@ func processValuePayload(p payload.Payload) (vp ValuePayload) { var b []Balance for _, balance := range balances { b = append(b, Balance{ - Value: balance.Value(), - Color: balance.Color().String(), + Value: balance.Value, + Color: balance.Color.String(), }) } t := OutputContent{ diff --git a/plugins/testsnapshots/plugin.go b/plugins/testsnapshots/plugin.go deleted file mode 100644 index 294e5337..00000000 --- a/plugins/testsnapshots/plugin.go +++ /dev/null @@ -1,41 +0,0 @@ -package testsnapshots - -import ( - "github.com/iotaledger/goshimmer/dapps/valuetransfers" - "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" - "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" - "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" - "github.com/iotaledger/hive.go/logger" - "github.com/iotaledger/hive.go/node" -) - -// FIXME: This plugin can be removed after snapshots is implemented -const ( - // PluginName is the plugin name of the TestSnapshots plugin. - PluginName = "TestSnapshots" -) - -var ( - // Plugin is the plugin instance of the TestSnapshots plugin. - Plugin = node.NewPlugin(PluginName, node.Disabled, configure, run) - log *logger.Logger - - // addresses for snapshots - address0, _ = address.FromBase58("JaMauTaTSVBNc13edCCvBK9fZxZ1KKW5fXegT1B7N9jY") -) - -func configure(_ *node.Plugin) { - log = logger.NewLogger(PluginName) - - valuetransfers.Tangle.LoadSnapshot(map[transaction.ID]map[address.Address][]*balance.Balance{ - transaction.GenesisID: { - address0: []*balance.Balance{ - balance.New(balance.ColorIOTA, 10000000), - }, - }, - }) - - log.Infof("load snapshots to tangle") -} - -func run(_ *node.Plugin) {} diff --git a/plugins/webapi/value/gettransactionbyid/handler.go b/plugins/webapi/value/gettransactionbyid/handler.go index df4791ab..b4bee704 100644 --- a/plugins/webapi/value/gettransactionbyid/handler.go +++ b/plugins/webapi/value/gettransactionbyid/handler.go @@ -7,40 +7,40 @@ import ( "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" "github.com/iotaledger/goshimmer/plugins/webapi/value/utils" "github.com/labstack/echo" - "github.com/labstack/gommon/log" ) // Handler gets the transaction by id. func Handler(c echo.Context) error { txnID, err := transaction.IDFromBase58(c.QueryParam("txnID")) if err != nil { - log.Info(err) return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) } // get txn by txn id - txnObj := valuetransfers.Tangle.Transaction(txnID) - defer txnObj.Release() - if !txnObj.Exists() { + cachedTxnMetaObj := valuetransfers.Tangle.TransactionMetadata(txnID) + defer cachedTxnMetaObj.Release() + if !cachedTxnMetaObj.Exists() { return c.JSON(http.StatusNotFound, Response{Error: "Transaction not found"}) } - txn := utils.ParseTransaction(txnObj.Unwrap()) - - // get txn metadata - txnMetadataObj := valuetransfers.Tangle.TransactionMetadata(txnID) - defer txnMetadataObj.Release() - if !txnMetadataObj.Exists() { - return c.JSON(http.StatusNotFound, Response{Error: "Transaction Metadata not found"}) + cachedTxnObj := valuetransfers.Tangle.Transaction(txnID) + defer cachedTxnObj.Release() + if !cachedTxnObj.Exists() { + return c.JSON(http.StatusNotFound, Response{Error: "Transaction not found"}) } - txnMetadata := txnMetadataObj.Unwrap() + txn := utils.ParseTransaction(cachedTxnObj.Unwrap()) + txnMeta := cachedTxnMetaObj.Unwrap() + txnMeta.Preferred() return c.JSON(http.StatusOK, Response{ Transaction: txn, InclusionState: utils.InclusionState{ - Solid: txnMetadata.Solid(), - Confirmed: txnMetadata.Confirmed(), - Rejected: txnMetadata.Rejected(), - Liked: txnMetadata.Liked(), + Confirmed: txnMeta.Confirmed(), + Conflicting: txnMeta.Conflicting(), + Liked: txnMeta.Liked(), + Solid: txnMeta.Solid(), + Rejected: txnMeta.Rejected(), + Finalized: txnMeta.Finalized(), + Preferred: txnMeta.Preferred(), }, }) } diff --git a/plugins/webapi/value/sendtransaction/handler.go b/plugins/webapi/value/sendtransaction/handler.go index b1dfa63d..8a655715 100644 --- a/plugins/webapi/value/sendtransaction/handler.go +++ b/plugins/webapi/value/sendtransaction/handler.go @@ -7,21 +7,18 @@ import ( "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" "github.com/iotaledger/goshimmer/plugins/issuer" "github.com/labstack/echo" - "github.com/labstack/gommon/log" ) // Handler sends a transaction. func Handler(c echo.Context) error { var request Request if err := c.Bind(&request); err != nil { - log.Info(err.Error()) return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) } // prepare transaction tx, _, err := transaction.FromBytes(request.TransactionBytes) if err != nil { - log.Info(err.Error()) return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) } @@ -29,7 +26,6 @@ func Handler(c echo.Context) error { payload := valuetransfers.ValueObjectFactory().IssueTransaction(tx) _, err = issuer.IssuePayload(payload) if err != nil { - log.Info(err.Error()) return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) } diff --git a/plugins/webapi/value/unspentoutputs/handler.go b/plugins/webapi/value/unspentoutputs/handler.go index 738c278f..05be4eb5 100644 --- a/plugins/webapi/value/unspentoutputs/handler.go +++ b/plugins/webapi/value/unspentoutputs/handler.go @@ -28,29 +28,38 @@ func Handler(c echo.Context) error { outputids := make([]OutputID, 0) // get outputids by address - for id, outputObj := range valuetransfers.Tangle.OutputsOnAddress(address) { - defer outputObj.Release() - output := outputObj.Unwrap() + for id, cachedOutput := range valuetransfers.Tangle.OutputsOnAddress(address) { + // TODO: don't do this in a for + defer cachedOutput.Release() + output := cachedOutput.Unwrap() + cachedTxMeta := valuetransfers.Tangle.TransactionMetadata(output.TransactionID()) + // TODO: don't do this in a for + defer cachedTxMeta.Release() if output.ConsumerCount() == 0 { // iterate balances var b []utils.Balance for _, balance := range output.Balances() { b = append(b, utils.Balance{ - Value: balance.Value(), - Color: balance.Color().String(), + Value: balance.Value, + Color: balance.Color.String(), }) } + inclusionState := utils.InclusionState{} + if cachedTxMeta.Exists() { + txMeta := cachedTxMeta.Unwrap() + inclusionState.Confirmed = txMeta.Confirmed() + inclusionState.Liked = txMeta.Liked() + inclusionState.Rejected = txMeta.Rejected() + inclusionState.Finalized = txMeta.Finalized() + inclusionState.Conflicting = txMeta.Conflicting() + inclusionState.Confirmed = txMeta.Confirmed() + } outputids = append(outputids, OutputID{ - ID: id.String(), - Balances: b, - InclusionState: utils.InclusionState{ - Solid: output.Solid(), - Confirmed: output.Confirmed(), - Rejected: output.Rejected(), - Liked: output.Liked(), - }, + ID: id.String(), + Balances: b, + InclusionState: inclusionState, }) } } diff --git a/plugins/webapi/value/utils/transaction_handler.go b/plugins/webapi/value/utils/transaction_handler.go index 359446a2..9664c52d 100644 --- a/plugins/webapi/value/utils/transaction_handler.go +++ b/plugins/webapi/value/utils/transaction_handler.go @@ -21,8 +21,8 @@ func ParseTransaction(t *transaction.Transaction) (txn Transaction) { var b []Balance for _, balance := range balances { b = append(b, Balance{ - Value: balance.Value(), - Color: balance.Color().String(), + Value: balance.Value, + Color: balance.Color.String(), }) } t := Output{ @@ -64,8 +64,11 @@ type Balance struct { // InclusionState represents the different states of an OutputID type InclusionState struct { - Solid bool `json:"solid,omitempty"` - Confirmed bool `json:"confirmed,omitempty"` - Rejected bool `json:"rejected,omitempty"` - Liked bool `json:"liked,omitempty"` + Solid bool `json:"solid,omitempty"` + Confirmed bool `json:"confirmed,omitempty"` + Rejected bool `json:"rejected,omitempty"` + Liked bool `json:"liked,omitempty"` + Conflicting bool `json:"conflicting,omitempty"` + Finalized bool `json:"finalized,omitempty"` + Preferred bool `json:"preferred,omitempty"` } diff --git a/tools/integration-tests/assets/7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin b/tools/integration-tests/assets/7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin new file mode 100644 index 0000000000000000000000000000000000000000..2e55197593a21cb650727fb4a4f9c58e92acbc4d GIT binary patch literal 129 zcmZQ%AP&IPG2ULq?kZ|hk!EV4Xzkg0xA5t`gBt3uGwTYU_U`z-MC&U|A;YOz*2EYM E031>ge*gdg literal 0 HcmV?d00001 diff --git a/tools/integration-tests/assets/entrypoint.sh b/tools/integration-tests/assets/entrypoint.sh new file mode 100755 index 00000000..a5ec0407 --- /dev/null +++ b/tools/integration-tests/assets/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash +echo "copying assets into shared volume..." +rm -rf /assets/* +cp -rp /tmp/assets/* /assets +chmod 777 /assets/* +echo "assets:" +ls /assets +echo "running tests..." +go test ./tests/"${TEST_NAME}" -v -timeout 30m diff --git a/tools/integration-tests/runTests.sh b/tools/integration-tests/runTests.sh index 28cb55b9..6dbde91b 100755 --- a/tools/integration-tests/runTests.sh +++ b/tools/integration-tests/runTests.sh @@ -1,6 +1,6 @@ #!/bin/bash -TEST_NAMES='autopeering common drng message value' +TEST_NAMES='autopeering common drng message value consensus' echo "Build GoShimmer image" docker build -t iotaledger/goshimmer ../../. diff --git a/tools/integration-tests/tester/docker-compose.yml b/tools/integration-tests/tester/docker-compose.yml index afbc3e30..578b04c3 100644 --- a/tools/integration-tests/tester/docker-compose.yml +++ b/tools/integration-tests/tester/docker-compose.yml @@ -5,13 +5,19 @@ services: container_name: tester image: golang:1.14.4 working_dir: /tmp/goshimmer/tools/integration-tests/tester - entrypoint: go test ./tests/${TEST_NAME} -v -mod=readonly -timeout 30m + command: /tmp/assets/entrypoint.sh + environment: + - TEST_NAME=${TEST_NAME} volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - - ../../..:/tmp/goshimmer:ro + - ../../..:/tmp/goshimmer:rw - ../logs:/tmp/logs + - ../assets:/tmp/assets - goshimmer-testing-cache:/go + - goshimmer-testing-assets:/assets volumes: goshimmer-testing-cache: name: goshimmer-testing-cache + goshimmer-testing-assets: + name: goshimmer-testing-assets \ No newline at end of file diff --git a/tools/integration-tests/tester/framework/docker.go b/tools/integration-tests/tester/framework/docker.go index 67a65d58..189a82f0 100644 --- a/tools/integration-tests/tester/framework/docker.go +++ b/tools/integration-tests/tester/framework/docker.go @@ -82,11 +82,11 @@ func (d *DockerContainer) CreateGoShimmerPeer(config GoShimmerConfig) error { Cmd: strslice.StrSlice{ "--skip-config=true", "--logger.level=debug", + fmt.Sprintf("--valueLayer.fcob.averageNetworkDelay=%d", ParaFCoBAverageNetworkDelay), + fmt.Sprintf("--autopeering.outboundUpdateIntervalMs=%d", ParaOutboundUpdateIntervalMs), fmt.Sprintf("--node.disablePlugins=%s", config.DisabledPlugins), fmt.Sprintf("--node.enablePlugins=%s", func() string { var plugins []string - //TODO: remove this when snapshots is implemented - plugins = append(plugins, "testSnapshots") if config.Bootstrap { plugins = append(plugins, "Bootstrap") } @@ -95,6 +95,7 @@ func (d *DockerContainer) CreateGoShimmerPeer(config GoShimmerConfig) error { } return strings.Join(plugins[:], ",") }()), + fmt.Sprintf("--valueLayer.snapshot.file=%s", config.SnapshotFilePath), fmt.Sprintf("--bootstrap.initialIssuance.timePeriodSec=%d", config.BootstrapInitialIssuanceTimePeriodSec), "--webapi.bindAddress=0.0.0.0:8080", fmt.Sprintf("--autopeering.seed=base58:%s", config.Seed), @@ -106,7 +107,9 @@ func (d *DockerContainer) CreateGoShimmerPeer(config GoShimmerConfig) error { }, } - return d.CreateContainer(config.Name, containerConfig) + return d.CreateContainer(config.Name, containerConfig, &container.HostConfig{ + Binds: []string{"goshimmer-testing-assets:/assets:rw"}, + }) } // CreateDrandMember creates a new container with the drand configuration. @@ -155,7 +158,7 @@ func (d *DockerContainer) CreatePumba(name string, containerName string, targetI cmd = append(cmd, slice...) containerConfig := &container.Config{ - Image: "gaiaadm/pumba:latest", + Image: "gaiaadm/pumba:0.7.2", Cmd: cmd, } diff --git a/tools/integration-tests/tester/framework/framework.go b/tools/integration-tests/tester/framework/framework.go index d4456d57..fa1a80a9 100644 --- a/tools/integration-tests/tester/framework/framework.go +++ b/tools/integration-tests/tester/framework/framework.go @@ -80,7 +80,12 @@ func (f *Framework) CreateNetwork(name string, peers int, minimumNeighbors int, // create peers/GoShimmer nodes for i := 0; i < peers; i++ { config := GoShimmerConfig{ - Bootstrap: i == 0, + Bootstrap: func(i int) bool { + if ParaBootstrapOnEveryNode { + return true + } + return i == 0 + }(i), BootstrapInitialIssuanceTimePeriodSec: bootstrapInitialIssuanceTimePeriodSec, Faucet: i == 0, } @@ -128,9 +133,12 @@ func (f *Framework) CreateNetworkWithPartitions(name string, peers, partitions, // create peers/GoShimmer nodes for i := 0; i < peers; i++ { - config := GoShimmerConfig{ - Bootstrap: i == 0, - } + config := GoShimmerConfig{Bootstrap: func(i int) bool { + if ParaBootstrapOnEveryNode { + return true + } + return i == 0 + }(i)} if _, err = network.CreatePeer(config); err != nil { return nil, err } @@ -236,7 +244,12 @@ func (f *Framework) CreateDRNGNetwork(name string, members, peers, minimumNeighb // create peers/GoShimmer nodes for i := 0; i < peers; i++ { - config.Bootstrap = i == 0 + config.Bootstrap = func(i int) bool { + if ParaBootstrapOnEveryNode { + return true + } + return i == 0 + }(i) config.Seed = privKeys[i].Seed().String() if _, err = drng.CreatePeer(config, pubKeys[i]); err != nil { return nil, err diff --git a/tools/integration-tests/tester/framework/network.go b/tools/integration-tests/tester/framework/network.go index 4580588f..d72337f1 100644 --- a/tools/integration-tests/tester/framework/network.go +++ b/tools/integration-tests/tester/framework/network.go @@ -100,11 +100,12 @@ func (n *Network) CreatePeer(c GoShimmerConfig) (*Peer, error) { config.EntryNodeHost = n.namePrefix(containerNameEntryNode) config.EntryNodePublicKey = n.entryNodePublicKey() config.DisabledPlugins = disabledPluginsPeer + config.SnapshotFilePath = snapshotFilePath // create wallet var nodeWallet *wallet.Wallet if c.Faucet == true { - nodeWallet = wallet.New(faucetSeed) + nodeWallet = wallet.New(genesisSeed) } else { nodeWallet = wallet.New() } diff --git a/tools/integration-tests/tester/framework/parameters.go b/tools/integration-tests/tester/framework/parameters.go index ddbb7046..6c8e63c6 100644 --- a/tools/integration-tests/tester/framework/parameters.go +++ b/tools/integration-tests/tester/framework/parameters.go @@ -13,28 +13,39 @@ const ( logsDir = "/tmp/logs/" - disabledPluginsEntryNode = "portcheck,dashboard,analysis-client,profiling,gossip,drng,issuer,sync,metrics,valuetransfers,testsnapshots,messagelayer,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint" + disabledPluginsEntryNode = "portcheck,dashboard,analysis-client,profiling,gossip,drng,issuer,sync,metrics,valuetransfers,messagelayer,webapi,webapibroadcastdataendpoint,webapifindtransactionhashesendpoint,webapigetneighborsendpoint,webapigettransactionobjectsbyhashendpoint,webapigettransactiontrytesbyhashendpoint" disabledPluginsPeer = "portcheck,dashboard,analysis-client,profiling" - - dockerLogsPrefixLen = 8 + snapshotFilePath = "/assets/7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin" + dockerLogsPrefixLen = 8 dkgMaxTries = 50 exitStatusSuccessful = 0 ) +// Parameters to override before calling any peer creation function. +var ( + // ParaFCoBAverageNetworkDelay defines the configured avg. network delay (in seconds) for the FCOB rules. + ParaFCoBAverageNetworkDelay = 5 + // ParaOutboundUpdateIntervalMs the autopeering outbound update interval in milliseconds. + ParaOutboundUpdateIntervalMs = 100 + // ParaBootstrapOnEveryNode whether to enable the bootstrap plugin on every node. + ParaBootstrapOnEveryNode = false +) + var ( - faucetSeed = []byte{251, 163, 190, 98, 92, 82, 164, 79, 74, 48, 203, 162, 247, 119, 140, 76, 33, 100, 148, 204, 244, 248, 232, 18, - 132, 217, 85, 31, 246, 83, 193, 193} + genesisSeed = []byte{95, 76, 224, 164, 168, 80, 141, 174, 133, 77, 153, 100, 4, 202, 113, + 104, 71, 130, 88, 200, 46, 56, 243, 121, 216, 236, 70, 146, 234, 158, 206, 230} ) -// GoShimmerConfig defines the config of a GoShimmer node. +//GoShimmerConfig defines the config of a GoShimmer node. type GoShimmerConfig struct { Seed string Name string EntryNodeHost string EntryNodePublicKey string DisabledPlugins string + SnapshotFilePath string Bootstrap bool BootstrapInitialIssuanceTimePeriodSec int diff --git a/tools/integration-tests/tester/go.mod b/tools/integration-tests/tester/go.mod index 6fa168b7..f4891497 100644 --- a/tools/integration-tests/tester/go.mod +++ b/tools/integration-tests/tester/go.mod @@ -10,9 +10,10 @@ require ( github.com/docker/go-units v0.4.0 // indirect github.com/drand/drand v0.8.1 github.com/iotaledger/goshimmer v0.1.3 - github.com/iotaledger/hive.go v0.0.0-20200617164933-c48b4401b814 - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/mr-tron/base58 v1.1.3 + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/stretchr/testify v1.6.1 + github.com/iotaledger/hive.go v0.0.0-20200617164933-c48b4401b814 ) replace github.com/iotaledger/goshimmer => ../../.. diff --git a/tools/integration-tests/tester/go.sum b/tools/integration-tests/tester/go.sum index 701c02c1..d7794210 100644 --- a/tools/integration-tests/tester/go.sum +++ b/tools/integration-tests/tester/go.sum @@ -121,6 +121,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -204,6 +205,8 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/panjf2000/ants/v2 v2.2.2 h1:TWzusBjq/IflXhy+/S6u5wmMLCBdJnB9tPIx9Zmhvok= github.com/panjf2000/ants/v2 v2.2.2/go.mod h1:1GFm8bV8nyCQvU5K4WvBCTG1/YBFOD2VzjffD8fV55A= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= diff --git a/tools/integration-tests/tester/tests/consensus/consensus_conflicts_test.go b/tools/integration-tests/tester/tests/consensus/consensus_conflicts_test.go new file mode 100644 index 00000000..46fbc4ca --- /dev/null +++ b/tools/integration-tests/tester/tests/consensus/consensus_conflicts_test.go @@ -0,0 +1,209 @@ +package consensus + +import ( + "log" + "testing" + "time" + + "github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework" + + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/wallet" + "github.com/iotaledger/goshimmer/plugins/webapi/value/utils" + "github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests" + "github.com/mr-tron/base58/base58" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestConsensusFiftyFiftyOpinionSplit spawns two network partitions with their own peers, +// then issues valid value objects spending the genesis in both, deletes the partitions (and lets them merge) +// and then checks that the conflicts are resolved via FPC. +func TestConsensusFiftyFiftyOpinionSplit(t *testing.T) { + + // override avg. network delay to accustom integration test slowness + backupFCoBAvgNetworkDelay := framework.ParaFCoBAverageNetworkDelay + backupBootstrapOnEveryNode := framework.ParaBootstrapOnEveryNode + framework.ParaFCoBAverageNetworkDelay = 90 + framework.ParaBootstrapOnEveryNode = true + + // reset framework paras + defer func() { + framework.ParaFCoBAverageNetworkDelay = backupFCoBAvgNetworkDelay + framework.ParaBootstrapOnEveryNode = backupBootstrapOnEveryNode + }() + + // create two partitions with their own peers + n, err := f.CreateNetworkWithPartitions("abc", 6, 2, 2) + require.NoError(t, err) + defer tests.ShutdownNetwork(t, n) + + // split the network + for i, partition := range n.Partitions() { + log.Printf("partition %d peers:", i) + for _, p := range partition.Peers() { + log.Println(p.ID().String()) + } + } + + // genesis wallet + genesisSeedBytes, err := base58.Decode("7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih") + require.NoError(t, err, "couldn't decode genesis seed from base58 seed") + + const genesisBalance = 1000000000 + genesisWallet := wallet.New(genesisSeedBytes) + genesisAddr := genesisWallet.Seed().Address(0) + genesisOutputID := transaction.NewOutputID(genesisAddr, transaction.GenesisID) + + // issue transactions which spend the same genesis output in all partitions + conflictingTxs := make([]*transaction.Transaction, len(n.Partitions())) + conflictingTxIDs := make([]string, len(n.Partitions())) + receiverWallets := make([]*wallet.Wallet, len(n.Partitions())) + for i, partition := range n.Partitions() { + + // create a new receiver wallet for the given partition + partitionReceiverWallet := wallet.New() + destAddr := partitionReceiverWallet.Seed().Address(0) + receiverWallets[i] = partitionReceiverWallet + tx := transaction.New( + transaction.NewInputs(genesisOutputID), + transaction.NewOutputs(map[address.Address][]*balance.Balance{ + destAddr: { + {Value: genesisBalance, Color: balance.ColorIOTA}, + }, + })) + tx = tx.Sign(signaturescheme.ED25519(*genesisWallet.Seed().KeyPair(0))) + conflictingTxs[i] = tx + + // issue the transaction on the first peer of the partition + issuerPeer := partition.Peers()[0] + txID, err := issuerPeer.SendTransaction(tx.Bytes()) + conflictingTxIDs[i] = txID + log.Printf("issued conflict transaction %s on partition %d on peer %s", txID, i, issuerPeer.ID().String()) + assert.NoError(t, err) + + // check that the transaction is actually available on all the peers of the partition + missing, err := tests.AwaitTransactionAvailability(partition.Peers(), []string{txID}, 15*time.Second) + if err != nil { + assert.NoError(t, err, "transactions should have been available in partition") + for p, missingOnPeer := range missing { + log.Printf("missing on peer %s:", p) + for missingTx := range missingOnPeer { + log.Println("tx id: ", missingTx) + } + } + return + } + + require.NoError(t, err) + } + + // sleep the avg. network delay so both partitions prefer their own first seen transaction + log.Printf("waiting %d seconds avg. network delay to make the transactions "+ + "preferred in their corresponding partition", framework.ParaFCoBAverageNetworkDelay) + time.Sleep(time.Duration(framework.ParaFCoBAverageNetworkDelay) * time.Second) + + // check that each partition is preferring its corresponding transaction + log.Println("checking that each partition likes its corresponding transaction before the conflict:") + for i, partition := range n.Partitions() { + tests.CheckTransactions(t, partition.Peers(), map[string]*tests.ExpectedTransaction{ + conflictingTxIDs[i]: nil, + }, true, tests.ExpectedInclusionState{ + Confirmed: tests.False(), + Finalized: tests.False(), + Conflicting: tests.False(), + Solid: tests.True(), + Rejected: tests.False(), + Liked: tests.True(), + Preferred: tests.True(), + }) + } + + // merge back the partitions + log.Println("merging partitions...") + assert.NoError(t, n.DeletePartitions(), "merging the network partitions should work") + log.Println("waiting for resolved partitions to autopeer to each other") + err = n.WaitForAutopeering(4) + require.NoError(t, err) + + // ensure message flow so that both partitions will get the conflicting tx + for _, p := range n.Peers() { + tests.SendDataMessage(t, p, []byte("DATA"), 10) + } + + log.Println("waiting for transactions to be available on all peers...") + missing, err := tests.AwaitTransactionAvailability(n.Peers(), conflictingTxIDs, 30*time.Second) + if err != nil { + assert.NoError(t, err, "transactions should have been available") + for p, missingOnPeer := range missing { + log.Printf("missing on peer %s:", p) + for missingTx := range missingOnPeer { + log.Println("tx id: ", missingTx) + } + } + return + } + + expectations := map[string]*tests.ExpectedTransaction{} + for _, conflictingTx := range conflictingTxs { + utilsTx := utils.ParseTransaction(conflictingTx) + expectations[conflictingTx.ID().String()] = &tests.ExpectedTransaction{ + Inputs: &utilsTx.Inputs, + Outputs: &utilsTx.Outputs, + Signature: &utilsTx.Signature, + } + } + + // check that the transactions are marked as conflicting + tests.CheckTransactions(t, n.Peers(), expectations, true, tests.ExpectedInclusionState{ + Finalized: tests.False(), + Conflicting: tests.True(), + Solid: tests.True(), + }) + + // wait until the voting has finalized + log.Println("waiting for voting/transaction finalization to be done on all peers...") + awaitFinalization := map[string]tests.ExpectedInclusionState{} + for _, conflictingTx := range conflictingTxs { + awaitFinalization[conflictingTx.ID().String()] = tests.ExpectedInclusionState{ + Finalized: tests.True(), + } + } + err = tests.AwaitTransactionInclusionState(n.Peers(), awaitFinalization, 2*time.Minute) + assert.NoError(t, err) + + // now all transactions must be finalized and at most one must be confirmed + var confirmedOverConflictSet int + for _, conflictingTx := range conflictingTxIDs { + var rejected, confirmed int + for _, p := range n.Peers() { + tx, err := p.GetTransactionByID(conflictingTx) + assert.NoError(t, err) + if tx.InclusionState.Confirmed { + confirmed++ + continue + } + if tx.InclusionState.Rejected { + rejected++ + } + } + + if rejected != 0 { + assert.Len(t, n.Peers(), rejected, "the rejected count for %s should be equal to the amount of peers", conflictingTx) + } + if confirmed != 0 { + assert.Len(t, n.Peers(), confirmed, "the confirmed count for %s should be equal to the amount of peers", conflictingTx) + confirmedOverConflictSet++ + } + + assert.False(t, rejected == 0 && confirmed == 0, "a transaction must either be rejected or confirmed") + } + + // there must only be one confirmed transaction out of the conflict set + if confirmedOverConflictSet != 0 { + assert.Equal(t, 1, confirmedOverConflictSet, "only one transaction can be confirmed out of the conflict set. %d of %d are confirmed", confirmedOverConflictSet, len(conflictingTxIDs)) + } +} diff --git a/tools/integration-tests/tester/tests/consensus/consensus_noconflicts_test.go b/tools/integration-tests/tester/tests/consensus/consensus_noconflicts_test.go new file mode 100644 index 00000000..badf4040 --- /dev/null +++ b/tools/integration-tests/tester/tests/consensus/consensus_noconflicts_test.go @@ -0,0 +1,129 @@ +package consensus + +import ( + "log" + "math/rand" + "testing" + "time" + + "github.com/iotaledger/goshimmer/dapps/valuetransfers" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" + "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/wallet" + "github.com/iotaledger/goshimmer/plugins/webapi/value/utils" + "github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests" + "github.com/mr-tron/base58/base58" + "github.com/stretchr/testify/require" +) + +// TestConsensusNoConflicts issues valid non-conflicting value objects and then checks +// whether the ledger of every peer reflects the same correct state. +func TestConsensusNoConflicts(t *testing.T) { + n, err := f.CreateNetwork("consensus_TestConsensusNoConflicts", 4, 2) + require.NoError(t, err) + defer tests.ShutdownNetwork(t, n) + + time.Sleep(5 * time.Second) + + // genesis wallet + genesisSeedBytes, err := base58.Decode("7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih") + require.NoError(t, err, "couldn't decode genesis seed from base58 seed") + + const genesisBalance = 1000000000 + genesisWallet := wallet.New(genesisSeedBytes) + genesisAddr := genesisWallet.Seed().Address(0) + genesisOutputID := transaction.NewOutputID(genesisAddr, transaction.GenesisID) + + firstReceiver := wallet.New() + const depositCount = 10 + const deposit = genesisBalance / depositCount + firstReceiverAddresses := make([]string, depositCount) + firstReceiverDepositAddrs := make([]address.Address, depositCount) + firstReceiverDepositOutputs := map[address.Address][]*balance.Balance{} + firstReceiverExpectedBalances := map[string]map[balance.Color]int64{} + for i := 0; i < depositCount; i++ { + addr := firstReceiver.Seed().Address(uint64(i)) + firstReceiverDepositAddrs[i] = addr + firstReceiverAddresses[i] = addr.String() + firstReceiverDepositOutputs[addr] = []*balance.Balance{{Value: deposit, Color: balance.ColorIOTA}} + firstReceiverExpectedBalances[addr.String()] = map[balance.Color]int64{balance.ColorIOTA: deposit} + } + + // issue transaction spending from the genesis output + log.Printf("issuing transaction spending genesis to %d addresses", depositCount) + tx := transaction.New(transaction.NewInputs(genesisOutputID), transaction.NewOutputs(firstReceiverDepositOutputs)) + tx = tx.Sign(signaturescheme.ED25519(*genesisWallet.Seed().KeyPair(0))) + utilsTx := utils.ParseTransaction(tx) + + txID, err := n.Peers()[0].SendTransaction(tx.Bytes()) + require.NoError(t, err) + + // wait for the transaction to be propagated through the network + // and it becoming preferred, finalized and confirmed + log.Println("waiting 2.5 avg. network delays") + time.Sleep(valuetransfers.DefaultAverageNetworkDelay*2 + valuetransfers.DefaultAverageNetworkDelay/2) + + // since we just issued a transaction spending the genesis output, there + // shouldn't be any UTXOs on the genesis address anymore + log.Println("checking that genesis has no UTXOs") + tests.CheckAddressOutputsFullyConsumed(t, n.Peers(), []string{genesisAddr.String()}) + + // since we waited 2.5 avg. network delays and there were no conflicting transactions, + // the transaction we just issued must be preferred, liked, finalized and confirmed + log.Println("check that the transaction is finalized/confirmed by all peers") + tests.CheckTransactions(t, n.Peers(), map[string]*tests.ExpectedTransaction{ + txID: {Inputs: &utilsTx.Inputs, Outputs: &utilsTx.Outputs, Signature: &utilsTx.Signature}, + }, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), Finalized: tests.True(), + Conflicting: tests.False(), Solid: tests.True(), + Rejected: tests.False(), Liked: tests.True(), + }) + + // check balances on peers + log.Println("ensure that all the peers have the same ledger state") + tests.CheckBalances(t, n.Peers(), firstReceiverExpectedBalances) + + // issue transactions spending all the outputs which were just created from a random peer + secondReceiverWallet := wallet.New() + secondReceiverAddresses := make([]string, depositCount) + secondReceiverExpectedBalances := map[string]map[balance.Color]int64{} + secondReceiverExpectedTransactions := map[string]*tests.ExpectedTransaction{} + for i := 0; i < depositCount; i++ { + addr := secondReceiverWallet.Seed().Address(uint64(i)) + tx := transaction.New( + transaction.NewInputs(transaction.NewOutputID(firstReceiver.Seed().Address(uint64(i)), tx.ID())), + transaction.NewOutputs(map[address.Address][]*balance.Balance{ + addr: {{Value: deposit, Color: balance.ColorIOTA}}, + }), + ) + secondReceiverAddresses[i] = addr.String() + tx = tx.Sign(signaturescheme.ED25519(*firstReceiver.Seed().KeyPair(uint64(i)))) + txID, err := n.Peers()[rand.Intn(len(n.Peers()))].SendTransaction(tx.Bytes()) + require.NoError(t, err) + + utilsTx := utils.ParseTransaction(tx) + secondReceiverExpectedBalances[addr.String()] = map[balance.Color]int64{balance.ColorIOTA: deposit} + secondReceiverExpectedTransactions[txID] = &tests.ExpectedTransaction{ + Inputs: &utilsTx.Inputs, Outputs: &utilsTx.Outputs, Signature: &utilsTx.Signature, + } + } + + // wait again some network delays for the transactions to materialize + log.Println("waiting 2.5 avg. network delays") + time.Sleep(valuetransfers.DefaultAverageNetworkDelay*2 + valuetransfers.DefaultAverageNetworkDelay/2) + log.Println("checking that first set of addresses contain no UTXOs") + tests.CheckAddressOutputsFullyConsumed(t, n.Peers(), firstReceiverAddresses) + log.Println("checking that the 2nd batch transactions are finalized/confirmed") + tests.CheckTransactions(t, n.Peers(), secondReceiverExpectedTransactions, true, + tests.ExpectedInclusionState{ + Confirmed: tests.True(), Finalized: tests.True(), + Conflicting: tests.False(), Solid: tests.True(), + Rejected: tests.False(), Liked: tests.True(), + }, + ) + + log.Println("check that the 2nd batch of receive addresses is the same on all peers") + tests.CheckBalances(t, n.Peers(), secondReceiverExpectedBalances) +} diff --git a/tools/integration-tests/tester/tests/consensus/main_test.go b/tools/integration-tests/tester/tests/consensus/main_test.go new file mode 100644 index 00000000..422928f9 --- /dev/null +++ b/tools/integration-tests/tester/tests/consensus/main_test.go @@ -0,0 +1,23 @@ +package consensus + +import ( + "os" + "testing" + + "github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework" +) + +var f *framework.Framework + +// TestMain gets called by the test utility and is executed before any other test in this package. +// It is therefore used to initialize the integration testing framework. +func TestMain(m *testing.M) { + var err error + f, err = framework.Instance() + if err != nil { + panic(err) + } + + // call the tests + os.Exit(m.Run()) +} diff --git a/tools/integration-tests/tester/tests/testutil.go b/tools/integration-tests/tester/tests/testutil.go index b47c5c12..3cf762bb 100644 --- a/tools/integration-tests/tester/tests/testutil.go +++ b/tools/integration-tests/tester/tests/testutil.go @@ -1,8 +1,11 @@ package tests import ( + "errors" "fmt" "math/rand" + "sync" + "sync/atomic" "testing" "time" @@ -11,11 +14,18 @@ import ( "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" "github.com/iotaledger/goshimmer/packages/binary/messagelayer/payload" + "github.com/iotaledger/goshimmer/plugins/webapi/value/utils" "github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework" + "github.com/iotaledger/hive.go/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var ( + ErrTransactionNotAvailableInTime = errors.New("transaction was not available in time") + ErrTransactionStateNotSameInTime = errors.New("transaction state did not materialize in time") +) + const maxRetry = 50 // DataMessageSent defines a struct to identify from which issuer a data message was sent. @@ -303,8 +313,8 @@ func SendColoredTransaction(t *testing.T, from *framework.Peer, to *framework.Pe // If the color is balance.ColorNew, it should be recolored with txId. func updateBalanceList(addrBalance map[string]map[balance.Color]int64, balances []*balance.Balance, from, to, txId string) { for _, b := range balances { - color := b.Color() - value := b.Value() + color := b.Color + value := b.Value if value < 0 { // deduct addrBalance[from][color] += value @@ -355,8 +365,65 @@ func CheckBalances(t *testing.T, peers []*framework.Peer, addrBalance map[string } } -// CheckTransactions performs checks to make sure that all peers have received all transactions . -func CheckTransactions(t *testing.T, peers []*framework.Peer, transactionIDs []string, checkSynchronized bool) { +// CheckAddressOutputsFullyConsumed performs checks to make sure that on all given peers, +// the given addresses have no UTXOs. +func CheckAddressOutputsFullyConsumed(t *testing.T, peers []*framework.Peer, addrs []string) { + for _, peer := range peers { + resp, err := peer.GetUnspentOutputs(addrs) + assert.NoError(t, err) + assert.Len(t, resp.Error, 0) + for i, utxos := range resp.UnspentOutputs { + assert.Len(t, utxos.OutputIDs, 0, "address %s should not have any UTXOs", addrs[i]) + } + } +} + +// ExpectedInclusionState is an expected inclusion state. +// All fields are optional. +type ExpectedInclusionState struct { + // The optional confirmed state to check against. + Confirmed *bool + // The optional finalized state to check against. + Finalized *bool + // The optional conflict state to check against. + Conflicting *bool + // The optional solid state to check against. + Solid *bool + // The optional rejected state to check against. + Rejected *bool + // The optional liked state to check against. + Liked *bool + // The optional preferred state to check against. + Preferred *bool +} + +// True returns a pointer to a true bool. +func True() *bool { + x := true + return &x +} + +// False returns a pointer to a false bool. +func False() *bool { + x := false + return &x +} + +// ExpectedTransaction defines the expected data of a transaction. +// All fields are optional. +type ExpectedTransaction struct { + // The optional input IDs to check against. + Inputs *[]string + // The optional outputs to check against. + Outputs *[]utils.Output + // The optional signature to check against. + Signature *[]byte +} + +// CheckTransactions performs checks to make sure that all peers have received all transactions. +// Optionally takes an expected inclusion state for all supplied transaction IDs and expected transaction +// data per transaction ID. +func CheckTransactions(t *testing.T, peers []*framework.Peer, transactionIDs map[string]*ExpectedTransaction, checkSynchronized bool, expectedInclusionState ExpectedInclusionState) { for _, peer := range peers { if checkSynchronized { // check that the peer sees itself as synchronized @@ -365,15 +432,145 @@ func CheckTransactions(t *testing.T, peers []*framework.Peer, transactionIDs []s require.True(t, info.Synced) } - for _, txId := range transactionIDs { + for txId, expectedTransaction := range transactionIDs { resp, err := peer.GetTransactionByID(txId) require.NoError(t, err) // check inclusion state - assert.True(t, resp.InclusionState.Confirmed) - assert.False(t, resp.InclusionState.Rejected) + if expectedInclusionState.Confirmed != nil { + assert.Equal(t, *expectedInclusionState.Confirmed, resp.InclusionState.Confirmed, "confirmed state doesn't match - %s", txId) + } + if expectedInclusionState.Conflicting != nil { + assert.Equal(t, *expectedInclusionState.Conflicting, resp.InclusionState.Conflicting, "conflict state doesn't match - %s", txId) + } + if expectedInclusionState.Solid != nil { + assert.Equal(t, *expectedInclusionState.Solid, resp.InclusionState.Solid, "solid state doesn't match - %s", txId) + } + if expectedInclusionState.Rejected != nil { + assert.Equal(t, *expectedInclusionState.Rejected, resp.InclusionState.Rejected, "rejected state doesn't match - %s", txId) + } + if expectedInclusionState.Liked != nil { + assert.Equal(t, *expectedInclusionState.Liked, resp.InclusionState.Liked, "liked state doesn't match - %s", txId) + } + if expectedInclusionState.Preferred != nil { + assert.Equal(t, *expectedInclusionState.Preferred, resp.InclusionState.Preferred, "preferred state doesn't match - %s", txId) + } + + if expectedTransaction != nil { + if expectedTransaction.Inputs != nil { + assert.Equal(t, *expectedTransaction.Inputs, resp.Transaction.Inputs, "inputs do not match - %s", txId) + } + if expectedTransaction.Outputs != nil { + assert.Equal(t, *expectedTransaction.Outputs, resp.Transaction.Outputs, "outputs do not match - %s", txId) + } + if expectedTransaction.Signature != nil { + assert.Equal(t, *expectedTransaction.Signature, resp.Transaction.Signature, "signatures do not match - %s", txId) + } + } + } + } +} + +// AwaitTransactionAvailability awaits until the given transaction IDs become available on all given peers or +// the max duration is reached. Returns a map of missing transactions per peer. An error is returned if at least +// one peer does not have all specified transactions available. +func AwaitTransactionAvailability(peers []*framework.Peer, transactionIDs []string, maxAwait time.Duration) (missing map[string]map[string]types.Empty, err error) { + s := time.Now() + var missingMu sync.Mutex + missing = map[string]map[string]types.Empty{} + for ; time.Since(s) < maxAwait; time.Sleep(500 * time.Millisecond) { + var wg sync.WaitGroup + wg.Add(len(peers)) + counter := int32(len(peers) * len(transactionIDs)) + for _, p := range peers { + go func(p *framework.Peer) { + defer wg.Done() + for _, txID := range transactionIDs { + _, err := p.GetTransactionByID(txID) + if err == nil { + missingMu.Lock() + m, has := missing[p.ID().String()] + if has { + delete(m, txID) + if len(m) == 0 { + delete(missing, p.ID().String()) + } + } + missingMu.Unlock() + atomic.AddInt32(&counter, -1) + continue + } + missingMu.Lock() + m, has := missing[p.ID().String()] + if !has { + m = map[string]types.Empty{} + } + m[txID] = types.Empty{} + missing[p.ID().String()] = m + missingMu.Unlock() + } + }(p) + } + wg.Wait() + if counter == 0 { + // everything available + return missing, nil + } + } + return missing, ErrTransactionNotAvailableInTime +} + +// AwaitTransactionInclusionState awaits on all given peers until the specified transactions +// have the expected state or max duration is reached. This function does not gracefully +// handle the transactions not existing on the given peers, therefore it must be ensured +// the the transactions exist beforehand. +func AwaitTransactionInclusionState(peers []*framework.Peer, transactionIDs map[string]ExpectedInclusionState, maxAwait time.Duration) error { + s := time.Now() + for ; time.Since(s) < maxAwait; time.Sleep(1 * time.Second) { + var wg sync.WaitGroup + wg.Add(len(peers)) + counter := int32(len(peers) * len(transactionIDs)) + for _, p := range peers { + go func(p *framework.Peer) { + defer wg.Done() + for txID := range transactionIDs { + tx, err := p.GetTransactionByID(txID) + if err != nil { + continue + } + expInclState := transactionIDs[txID] + if expInclState.Confirmed != nil && *expInclState.Confirmed != tx.InclusionState.Confirmed { + continue + } + if expInclState.Conflicting != nil && *expInclState.Conflicting != tx.InclusionState.Conflicting { + continue + } + if expInclState.Finalized != nil && *expInclState.Finalized != tx.InclusionState.Finalized { + continue + } + if expInclState.Liked != nil && *expInclState.Liked != tx.InclusionState.Liked { + continue + } + if expInclState.Preferred != nil && *expInclState.Preferred != tx.InclusionState.Preferred { + continue + } + if expInclState.Rejected != nil && *expInclState.Rejected != tx.InclusionState.Rejected { + continue + } + if expInclState.Solid != nil && *expInclState.Solid != tx.InclusionState.Solid { + continue + } + atomic.AddInt32(&counter, -1) + } + }(p) + } + wg.Wait() + if counter == 0 { + // everything available + return nil } } + return ErrTransactionStateNotSameInTime } // ShutdownNetwork shuts down the network and reports errors. diff --git a/tools/integration-tests/tester/tests/value/value_test.go b/tools/integration-tests/tester/tests/value/value_test.go index a5fd1ea0..316e661d 100644 --- a/tools/integration-tests/tester/tests/value/value_test.go +++ b/tools/integration-tests/tester/tests/value/value_test.go @@ -18,27 +18,37 @@ func TestTransactionPersistence(t *testing.T) { // wait for peers to change their state to synchronized time.Sleep(5 * time.Second) - // faucet node sends 100 IOTA tokens to all peers in the network - txIds, addrBalance := tests.SendTransactionFromFaucet(t, n.Peers(), 100) + // master node sends funds to all peers in the network + txIdsSlice, addrBalance := tests.SendTransactionFromFaucet(t, n.Peers(), 100) + txIds := make(map[string]*tests.ExpectedTransaction) + for _, txID := range txIdsSlice { + txIds[txID] = nil + } // wait for messages to be gossiped - time.Sleep(2 * valuetransfers.AverageNetworkDelay) + time.Sleep(2 * valuetransfers.DefaultAverageNetworkDelay) // check whether the first issued transaction is available on all nodes, and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) // send value message randomly randomTxIds := tests.SendTransactionOnRandomPeer(t, n.Peers(), addrBalance, 10, 100) - txIds = append(txIds, randomTxIds...) + for _, randomTxId := range randomTxIds { + txIds[randomTxId] = nil + } // wait for messages to be gossiped - time.Sleep(2 * valuetransfers.AverageNetworkDelay) + time.Sleep(2 * valuetransfers.DefaultAverageNetworkDelay) // check whether all issued transactions are available on all nodes and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) @@ -59,7 +69,9 @@ func TestTransactionPersistence(t *testing.T) { time.Sleep(20 * time.Second) // check whether all issued transactions are available on all nodes and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // 5. check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) @@ -75,26 +87,36 @@ func TestValueColoredPersistence(t *testing.T) { time.Sleep(5 * time.Second) // master node sends funds to all peers in the network - txIds, addrBalance := tests.SendTransactionFromFaucet(t, n.Peers(), 100) + txIdsSlice, addrBalance := tests.SendTransactionFromFaucet(t, n.Peers(), 100) + txIds := make(map[string]*tests.ExpectedTransaction) + for _, txID := range txIdsSlice { + txIds[txID] = nil + } // wait for messages to be gossiped - time.Sleep(2 * valuetransfers.AverageNetworkDelay) + time.Sleep(2 * valuetransfers.DefaultAverageNetworkDelay) // check whether the transactions are available on all nodes, and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) // send funds around randomTxIds := tests.SendColoredTransactionOnRandomPeer(t, n.Peers(), addrBalance, 10) - txIds = append(txIds, randomTxIds...) + for _, randomTxId := range randomTxIds { + txIds[randomTxId] = nil + } // wait for value messages to be gossiped - time.Sleep(2 * valuetransfers.AverageNetworkDelay) + time.Sleep(2 * valuetransfers.DefaultAverageNetworkDelay) // check whether all issued transactions are persistently available on all nodes, and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) @@ -115,7 +137,9 @@ func TestValueColoredPersistence(t *testing.T) { time.Sleep(20 * time.Second) // check whether all issued transactions are persistently available on all nodes, and confirmed - tests.CheckTransactions(t, n.Peers(), txIds, true) + tests.CheckTransactions(t, n.Peers(), txIds, true, tests.ExpectedInclusionState{ + Confirmed: tests.True(), + }) // 5. check ledger state tests.CheckBalances(t, n.Peers(), addrBalance) -- GitLab