diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 00c09d77a7b0..99cd07988dda 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -117,11 +117,6 @@ func TestCommands(t *testing.T) { "/dag/resolve", "/dag/stat", "/dht", - "/dht/findpeer", - "/dht/findprovs", - "/dht/get", - "/dht/provide", - "/dht/put", "/dht/query", "/routing", "/routing/put", diff --git a/core/commands/dht.go b/core/commands/dht.go index 95ac187f5908..c86b6262f8b0 100644 --- a/core/commands/dht.go +++ b/core/commands/dht.go @@ -21,65 +21,10 @@ var DhtCmd = &cmds.Command{ }, Subcommands: map[string]*cmds.Command{ - "query": queryDhtCmd, - "findprovs": findProvidersDhtCmd, - "findpeer": findPeerDhtCmd, - "get": getValueDhtCmd, - "put": putValueDhtCmd, - "provide": provideRefDhtCmd, + "query": queryDhtCmd, }, } -var findProvidersDhtCmd = &cmds.Command{ - Helptext: findProvidersRoutingCmd.Helptext, - Arguments: findProvidersRoutingCmd.Arguments, - Options: findProvidersRoutingCmd.Options, - Run: findProvidersRoutingCmd.Run, - Encoders: findProvidersRoutingCmd.Encoders, - Type: findProvidersRoutingCmd.Type, - Status: cmds.Deprecated, -} - -var findPeerDhtCmd = &cmds.Command{ - Helptext: findPeerRoutingCmd.Helptext, - Arguments: findPeerRoutingCmd.Arguments, - Options: findPeerRoutingCmd.Options, - Run: findPeerRoutingCmd.Run, - Encoders: findPeerRoutingCmd.Encoders, - Type: findPeerRoutingCmd.Type, - Status: cmds.Deprecated, -} - -var getValueDhtCmd = &cmds.Command{ - Helptext: getValueRoutingCmd.Helptext, - Arguments: getValueRoutingCmd.Arguments, - Options: getValueRoutingCmd.Options, - Run: getValueRoutingCmd.Run, - Encoders: getValueRoutingCmd.Encoders, - Type: getValueRoutingCmd.Type, - Status: cmds.Deprecated, -} - -var putValueDhtCmd = &cmds.Command{ - Helptext: putValueRoutingCmd.Helptext, - Arguments: putValueRoutingCmd.Arguments, - Options: putValueRoutingCmd.Options, - Run: putValueRoutingCmd.Run, - Encoders: putValueRoutingCmd.Encoders, - Type: putValueRoutingCmd.Type, - Status: cmds.Deprecated, -} - -var provideRefDhtCmd = &cmds.Command{ - Helptext: provideRefRoutingCmd.Helptext, - Arguments: provideRefRoutingCmd.Arguments, - Options: provideRefRoutingCmd.Options, - Run: provideRefRoutingCmd.Run, - Encoders: provideRefRoutingCmd.Encoders, - Type: provideRefRoutingCmd.Type, - Status: cmds.Deprecated, -} - // kademlia extends the routing interface with a command to get the peers closest to the target type kademlia interface { routing.Routing diff --git a/docs/file-transfer.md b/docs/file-transfer.md index a1a1d1c59f9b..360bb57ac7bd 100644 --- a/docs/file-transfer.md +++ b/docs/file-transfer.md @@ -68,7 +68,7 @@ pitfalls that people run into) ### Checking providers When requesting content on ipfs, nodes search the DHT for 'provider records' to see who has what content. Let's manually do that on node B to make sure that -node B is able to determine that node A has the data. Run `ipfs dht findprovs +node B is able to determine that node A has the data. Run `ipfs routing findprovs `. We expect to see the peer ID of node A printed out. If this command returns nothing (or returns IDs that are not node A), then no record of A having the data exists on the network. This can happen if the data is added @@ -85,7 +85,7 @@ In the case where node B simply cannot form a connection to node A, despite knowing that it needs to, the likely culprit is a bad NAT. When node B learns that it needs to connect to node A, it checks the DHT for addresses for node A, and then starts trying to connect to them. We can check those addresses by -running `ipfs dht findpeer ` on node B. This command should +running `ipfs routing findpeer ` on node B. This command should return a list of addresses for node A. If it doesn't return any addresses, then you should try running the manual providing command from the previous steps. Example output of addresses might look something like this: diff --git a/test/cli/basic_commands_test.go b/test/cli/basic_commands_test.go index b4bb2c182cf3..69b0cc63bc8a 100644 --- a/test/cli/basic_commands_test.go +++ b/test/cli/basic_commands_test.go @@ -154,7 +154,6 @@ func TestCommandDocsWidth(t *testing.T) { "ipfs pin remote rm": true, "ipfs pin remote ls": true, "ipfs pin verify": true, - "ipfs dht get": true, "ipfs pin remote service add": true, "ipfs pin update": true, "ipfs pin rm": true, @@ -167,7 +166,6 @@ func TestCommandDocsWidth(t *testing.T) { "ipfs name": true, "ipfs object patch append-data": true, "ipfs object patch set-data": true, - "ipfs dht put": true, "ipfs diag profile": true, "ipfs diag cmds": true, "ipfs swarm addrs local": true, diff --git a/test/cli/dht_legacy_test.go b/test/cli/dht_legacy_test.go deleted file mode 100644 index cfcb4f0cd09e..000000000000 --- a/test/cli/dht_legacy_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package cli - -import ( - "sort" - "sync" - "testing" - - "github.com/ipfs/kubo/test/cli/harness" - "github.com/ipfs/kubo/test/cli/testutils" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLegacyDHT(t *testing.T) { - t.Parallel() - nodes := harness.NewT(t).NewNodes(5).Init() - nodes.ForEachPar(func(node *harness.Node) { - node.IPFS("config", "Routing.Type", "dht") - }) - nodes.StartDaemons().Connect() - - t.Run("ipfs dht findpeer", func(t *testing.T) { - t.Parallel() - res := nodes[1].RunIPFS("dht", "findpeer", nodes[0].PeerID().String()) - assert.Equal(t, 0, res.ExitCode()) - - swarmAddr := nodes[0].SwarmAddrsWithoutPeerIDs()[0] - require.Equal(t, swarmAddr.String(), res.Stdout.Trimmed()) - }) - - t.Run("ipfs dht get ", func(t *testing.T) { - t.Parallel() - hash := nodes[2].IPFSAddStr("hello world") - nodes[2].IPFS("name", "publish", "/ipfs/"+hash) - - res := nodes[1].IPFS("dht", "get", "/ipns/"+nodes[2].PeerID().String()) - assert.Contains(t, res.Stdout.String(), "/ipfs/"+hash) - - t.Run("put round trips (#3124)", func(t *testing.T) { - t.Parallel() - nodes[0].WriteBytes("get_result", res.Stdout.Bytes()) - res := nodes[0].IPFS("dht", "put", "/ipns/"+nodes[2].PeerID().String(), "get_result") - assert.Greater(t, len(res.Stdout.Lines()), 0, "should put to at least one node") - }) - - t.Run("put with bad keys fails (issue #5113, #4611)", func(t *testing.T) { - t.Parallel() - keys := []string{"foo", "/pk/foo", "/ipns/foo"} - for _, key := range keys { - key := key - t.Run(key, func(t *testing.T) { - t.Parallel() - res := nodes[0].RunIPFS("dht", "put", key) - assert.Equal(t, 1, res.ExitCode()) - assert.Contains(t, res.Stderr.String(), "invalid") - assert.Empty(t, res.Stdout.String()) - }) - } - }) - - t.Run("get with bad keys (issue #4611)", func(t *testing.T) { - for _, key := range []string{"foo", "/pk/foo"} { - key := key - t.Run(key, func(t *testing.T) { - t.Parallel() - res := nodes[0].RunIPFS("dht", "get", key) - assert.Equal(t, 1, res.ExitCode()) - assert.Contains(t, res.Stderr.String(), "invalid") - assert.Empty(t, res.Stdout.String()) - }) - } - }) - }) - - t.Run("ipfs dht findprovs", func(t *testing.T) { - t.Parallel() - hash := nodes[3].IPFSAddStr("some stuff") - res := nodes[4].IPFS("dht", "findprovs", hash) - assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed()) - }) - - t.Run("ipfs dht query ", func(t *testing.T) { - t.Parallel() - t.Run("normal DHT configuration", func(t *testing.T) { - t.Parallel() - hash := nodes[0].IPFSAddStr("some other stuff") - peerCounts := map[string]int{} - peerCountsMut := sync.Mutex{} - harness.Nodes(nodes).ForEachPar(func(node *harness.Node) { - res := node.IPFS("dht", "query", hash) - closestPeer := res.Stdout.Lines()[0] - // check that it's a valid peer ID - _, err := peer.Decode(closestPeer) - require.NoError(t, err) - - peerCountsMut.Lock() - peerCounts[closestPeer]++ - peerCountsMut.Unlock() - }) - // 4 nodes should see the same peer ID - // 1 node (the closest) should see a different one - var counts []int - for _, count := range peerCounts { - counts = append(counts, count) - } - sort.IntSlice(counts).Sort() - assert.Equal(t, []int{1, 4}, counts) - }) - }) - - t.Run("dht commands fail when offline", func(t *testing.T) { - t.Parallel() - node := harness.NewT(t).NewNode().Init() - - // these cannot be run in parallel due to repo locking (seems like a bug) - - t.Run("dht findprovs", func(t *testing.T) { - res := node.RunIPFS("dht", "findprovs", testutils.CIDEmptyDir) - assert.Equal(t, 1, res.ExitCode()) - assert.Contains(t, res.Stderr.String(), "this command must be run in online mode") - }) - - t.Run("dht findpeer", func(t *testing.T) { - res := node.RunIPFS("dht", "findpeer", testutils.CIDEmptyDir) - assert.Equal(t, 1, res.ExitCode()) - assert.Contains(t, res.Stderr.String(), "this command must be run in online mode") - }) - - t.Run("dht put", func(t *testing.T) { - node.WriteBytes("foo", []byte("foo")) - res := node.RunIPFS("dht", "put", "/ipns/"+node.PeerID().String(), "foo") - assert.Equal(t, 1, res.ExitCode()) - assert.Contains(t, res.Stderr.String(), "can't put while offline: pass `--allow-offline` to override") - }) - }) -}