diff --git a/test/cli/dht_legacy_test.go b/test/cli/dht_legacy_test.go index 437b62ae4ea..5a84b89631b 100644 --- a/test/cli/dht_legacy_test.go +++ b/test/cli/dht_legacy_test.go @@ -13,6 +13,7 @@ import ( ) func TestLegacyDHT(t *testing.T) { + t.Parallel() nodes := harness.NewT(t).NewNodes(5).Init() nodes.ForEachPar(func(node *harness.Node) { node.IPFS("config", "Routing.Type", "dht") diff --git a/test/cli/harness/nodes.go b/test/cli/harness/nodes.go index 78662afbbea..113289e3cfb 100644 --- a/test/cli/harness/nodes.go +++ b/test/cli/harness/nodes.go @@ -57,8 +57,8 @@ func (n Nodes) Connect() Nodes { return n } -func (n Nodes) StartDaemons() Nodes { - ForEachPar(n, func(node *Node) { node.StartDaemon() }) +func (n Nodes) StartDaemons(args ...string) Nodes { + ForEachPar(n, func(node *Node) { node.StartDaemon(args...) }) return n } diff --git a/test/cli/routing_dht_test.go b/test/cli/routing_dht_test.go new file mode 100644 index 00000000000..7dc0ddfcb45 --- /dev/null +++ b/test/cli/routing_dht_test.go @@ -0,0 +1,123 @@ +package cli + +import ( + "fmt" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testRoutingDHT(t *testing.T, enablePubsub bool) { + t.Run(fmt.Sprintf("enablePubSub=%v", enablePubsub), func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(5).Init() + nodes.ForEachPar(func(node *harness.Node) { + node.IPFS("config", "Routing.Type", "dht") + }) + + var daemonArgs []string + if enablePubsub { + daemonArgs = []string{ + "--enable-pubsub-experiment", + "--enable-namesys-pubsub", + } + } + + nodes.StartDaemons(daemonArgs...).Connect() + + t.Run("ipfs routing findpeer", func(t *testing.T) { + t.Parallel() + res := nodes[1].RunIPFS("routing", "findpeer", nodes[0].PeerID().String()) + assert.Equal(t, 0, res.ExitCode()) + + swarmAddr := nodes[0].SwarmAddrsWithoutPeerIDs()[0] + require.Equal(t, swarmAddr.String(), res.Stdout.Trimmed()) + }) + + t.Run("ipfs routing get ", func(t *testing.T) { + t.Parallel() + hash := nodes[2].IPFSAddStr("hello world") + nodes[2].IPFS("name", "publish", "/ipfs/"+hash) + + res := nodes[1].IPFS("routing", "get", "/ipns/"+nodes[2].PeerID().String()) + assert.Contains(t, res.Stdout.String(), "/ipfs/"+hash) + + t.Run("put round trips (#3124)", func(t *testing.T) { + t.Parallel() + nodes[0].WriteBytes("get_result", res.Stdout.Bytes()) + res := nodes[0].IPFS("routing", "put", "/ipns/"+nodes[2].PeerID().String(), "get_result") + assert.Greater(t, len(res.Stdout.Lines()), 0, "should put to at least one node") + }) + + t.Run("put with bad keys fails (issue #5113, #4611)", func(t *testing.T) { + t.Parallel() + keys := []string{"foo", "/pk/foo", "/ipns/foo"} + for _, key := range keys { + key := key + t.Run(key, func(t *testing.T) { + t.Parallel() + res := nodes[0].RunIPFS("routing", "put", key) + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "invalid") + assert.Empty(t, res.Stdout.String()) + }) + } + }) + + t.Run("get with bad keys (issue #4611)", func(t *testing.T) { + for _, key := range []string{"foo", "/pk/foo"} { + key := key + t.Run(key, func(t *testing.T) { + t.Parallel() + res := nodes[0].RunIPFS("routing", "get", key) + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "invalid") + assert.Empty(t, res.Stdout.String()) + }) + } + }) + }) + + t.Run("ipfs routing findprovs", func(t *testing.T) { + t.Parallel() + hash := nodes[3].IPFSAddStr("some stuff") + res := nodes[4].IPFS("routing", "findprovs", hash) + assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed()) + }) + + t.Run("routing commands fail when offline", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init() + + // these cannot be run in parallel due to repo locking + // this seems like a bug, we should be able to run these without locking the repo + + t.Run("routing findprovs", func(t *testing.T) { + res := node.RunIPFS("routing", "findprovs", testutils.CIDEmptyDir) + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "this command must be run in online mode") + }) + + t.Run("routing findpeer", func(t *testing.T) { + res := node.RunIPFS("routing", "findpeer", testutils.CIDEmptyDir) + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "this command must be run in online mode") + }) + + t.Run("routing put", func(t *testing.T) { + node.WriteBytes("foo", []byte("foo")) + res := node.RunIPFS("routing", "put", "/ipns/"+node.PeerID().String(), "foo") + assert.Equal(t, 1, res.ExitCode()) + assert.Contains(t, res.Stderr.String(), "this action must be run in online mode") + }) + }) + }) +} + +func TestRoutingDHT(t *testing.T) { + testRoutingDHT(t, false) + testRoutingDHT(t, true) +} diff --git a/test/sharness/t0170-routing-dht.sh b/test/sharness/t0170-routing-dht.sh deleted file mode 100755 index e6e9940f28a..00000000000 --- a/test/sharness/t0170-routing-dht.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env bash - -# This file does the same tests as t0170-dht.sh but uses 'routing' commands instead -# (only exception is query, which lives only under dht) -test_description="Test routing command for DHT queries" - -. lib/test-lib.sh - -test_dht() { - NUM_NODES=5 - - test_expect_success 'init iptb' ' - rm -rf .iptb/ && - iptb testbed create -type localipfs -count $NUM_NODES -init - ' - - test_expect_success 'DHT-only routing' ' - iptb run -- ipfs config Routing.Type dht - ' - - startup_cluster $NUM_NODES $@ - - test_expect_success 'peer ids' ' - PEERID_0=$(iptb attr get 0 id) && - PEERID_2=$(iptb attr get 2 id) - ' - - # ipfs routing findpeer - test_expect_success 'findpeer' ' - ipfsi 1 routing findpeer $PEERID_0 | sort >actual && - ipfsi 0 id -f "" | cut -d / -f 1-5 | sort >expected && - test_cmp actual expected - ' - - # ipfs routing get - test_expect_success 'get with good keys works' ' - HASH="$(echo "hello world" | ipfsi 2 add -q)" && - ipfsi 2 name publish "/ipfs/$HASH" && - ipfsi 1 routing get "/ipns/$PEERID_2" >get_result - ' - - test_expect_success 'get with good keys contains the right value' ' - cat get_result | grep -aq "/ipfs/$HASH" - ' - - test_expect_success 'put round trips (#3124)' ' - ipfsi 0 routing put "/ipns/$PEERID_2" get_result | sort >putted && - [ -s putted ] || - test_fsh cat putted - ' - - test_expect_success 'put with bad keys fails (issue #5113)' ' - ipfsi 0 routing put "foo" <<putted - ipfsi 0 routing put "/pk/foo" <<>putted - ipfsi 0 routing put "/ipns/foo" <<>putted - [ ! -s putted ] || - test_fsh cat putted - ' - - test_expect_success 'put with bad keys returns error (issue #4611)' ' - test_must_fail ipfsi 0 routing put "foo" << afile && - HASH=$(ipfsi 3 add -q afile) - ' - - # ipfs routing findprovs - test_expect_success 'findprovs' ' - ipfsi 4 routing findprovs $HASH > provs && - iptb attr get 3 id > expected && - test_cmp provs expected - ' - - # ipfs routing get --enc=json has correct properties - test_expect_success 'routing get --enc=json has correct properties' ' - HASH="$(echo "hello world" | ipfsi 2 add -q)" && - ipfsi 2 name publish "/ipfs/$HASH" && - ipfsi 1 routing get --enc=json "/ipns/$PEERID_2" | jq -e "has(\"Extra\") and has(\"Type\")" - ' - - # ipfs dht query - # - # We test all nodes. 4 nodes should see the same peer ID, one node (the - # closest) should see a different one. - - for i in $(test_seq 0 4); do - test_expect_success "dht query from $i" ' - ipfsi "$i" dht query "$HASH" | head -1 >closest-$i - ' - done - - test_expect_success "collecting results" ' - cat closest-* | sort | uniq -c | sed -e "s/ *\([0-9]\+\) .*/\1/g" | sort -g > actual && - echo 1 > expected && - echo 4 >> expected - ' - - test_expect_success "checking results" ' - test_cmp actual expected - ' - - test_expect_success 'stop iptb' ' - iptb stop - ' - - test_expect_success "dht commands fail when offline" ' - test_must_fail ipfsi 0 routing findprovs "$HASH" 2>err_findprovs && - test_must_fail ipfsi 0 routing findpeer "$HASH" 2>err_findpeer && - test_must_fail ipfsi 0 routing put "/ipns/$PEERID_2" "get_result" 2>err_put && - test_should_contain "this command must be run in online mode" err_findprovs && - test_should_contain "this command must be run in online mode" err_findpeer && - test_should_contain "this action must be run in online mode" err_put - ' -} - -test_dht -test_dht --enable-pubsub-experiment --enable-namesys-pubsub - -test_done