-
-
Notifications
You must be signed in to change notification settings - Fork 3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
rewrite of provides to better select peers to send RPCs to #456
Changes from 7 commits
07b0640
b4b6fe2
2ad23f0
46aa22e
14fc418
5af5625
b4c6c87
da04d26
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -102,10 +102,9 @@ func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.ID) error { | |
return nil | ||
} | ||
|
||
// putValueToNetwork stores the given key/value pair at the peer 'p' | ||
// meaning: it sends a PUT_VALUE message to p | ||
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.ID, | ||
key string, rec *pb.Record) error { | ||
// putValueToPeer stores the given key/value pair at the peer 'p' | ||
func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID, | ||
key u.Key, rec *pb.Record) error { | ||
|
||
pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0) | ||
pmes.Record = rec | ||
|
@@ -238,12 +237,12 @@ func (dht *IpfsDHT) Update(ctx context.Context, p peer.ID) { | |
} | ||
|
||
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in. | ||
func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.PeerInfo, *kb.RoutingTable) { | ||
func (dht *IpfsDHT) FindLocal(id peer.ID) peer.PeerInfo { | ||
p := dht.routingTable.Find(id) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. low priority but the latter would make more sense than the former func (rt *RoutingTable) Find(id peer.ID) peer.ID func (rt *RoutingTable) Exists(id peer.ID) bool There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, thats leftover from when it returned a |
||
if p != "" { | ||
return dht.peerstore.PeerInfo(p), dht.routingTable | ||
return dht.peerstore.PeerInfo(p) | ||
} | ||
return peer.PeerInfo{}, nil | ||
return peer.PeerInfo{} | ||
} | ||
|
||
// findPeerSingle asks peer 'p' if they know where the peer with id 'id' is | ||
|
@@ -257,26 +256,6 @@ func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key u.Ke | |
return dht.sendRequest(ctx, p, pmes) | ||
} | ||
|
||
func (dht *IpfsDHT) addProviders(key u.Key, pbps []*pb.Message_Peer) []peer.ID { | ||
peers := pb.PBPeersToPeerInfos(pbps) | ||
|
||
var provArr []peer.ID | ||
for _, pi := range peers { | ||
p := pi.ID | ||
|
||
// Dont add outselves to the list | ||
if p == dht.self { | ||
continue | ||
} | ||
|
||
log.Debugf("%s adding provider: %s for %s", dht.self, p, key) | ||
// TODO(jbenet) ensure providers is idempotent | ||
dht.providers.AddProvider(key, p) | ||
provArr = append(provArr, p) | ||
} | ||
return provArr | ||
} | ||
|
||
// nearestPeersToQuery returns the routing tables closest peers. | ||
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { | ||
key := u.Key(pmes.GetKey()) | ||
|
@@ -285,7 +264,7 @@ func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { | |
} | ||
|
||
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. | ||
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.ID { | ||
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID { | ||
closer := dht.nearestPeersToQuery(pmes, count) | ||
|
||
// no node? nil | ||
|
@@ -302,11 +281,16 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.ID { | |
} | ||
|
||
var filtered []peer.ID | ||
for _, p := range closer { | ||
for _, clp := range closer { | ||
// Dont send a peer back themselves | ||
if p == clp { | ||
continue | ||
} | ||
|
||
// must all be closer than self | ||
key := u.Key(pmes.GetKey()) | ||
if !kb.Closer(dht.self, p, key) { | ||
filtered = append(filtered, p) | ||
if !kb.Closer(dht.self, clp, key) { | ||
filtered = append(filtered, clp) | ||
} | ||
} | ||
|
||
|
@@ -323,23 +307,6 @@ func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, p peer.ID) error | |
return dht.network.DialPeer(ctx, p) | ||
} | ||
|
||
//TODO: this should be smarter about which keys it selects. | ||
func (dht *IpfsDHT) loadProvidableKeys() error { | ||
kl, err := dht.datastore.KeyList() | ||
if err != nil { | ||
return err | ||
} | ||
for _, dsk := range kl { | ||
k := u.KeyFromDsKey(dsk) | ||
if len(k) == 0 { | ||
log.Errorf("loadProvidableKeys error: %v", dsk) | ||
} | ||
|
||
dht.providers.AddProvider(k, dht.self) | ||
} | ||
return nil | ||
} | ||
|
||
// PingRoutine periodically pings nearest neighbors. | ||
func (dht *IpfsDHT) PingRoutine(t time.Duration) { | ||
defer dht.Children().Done() | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,7 +14,6 @@ import ( | |
dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" | ||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" | ||
|
||
// ci "github.com/jbenet/go-ipfs/crypto" | ||
inet "github.com/jbenet/go-ipfs/net" | ||
peer "github.com/jbenet/go-ipfs/peer" | ||
routing "github.com/jbenet/go-ipfs/routing" | ||
|
@@ -33,9 +32,9 @@ func init() { | |
} | ||
} | ||
|
||
func setupDHT(ctx context.Context, t *testing.T, addr ma.Multiaddr) *IpfsDHT { | ||
func setupDHT(ctx context.Context, t *testing.T, addr ma.Multiaddr, seed int64) *IpfsDHT { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's painful to put the seeds everywhere in the interface, the testutil package is there to have all the sane defaults for tests. Lets use a shared seed (the test shared time-seeded rand, or the current time itself), and move the bits # into testutil as a default. |
||
|
||
sk, pk, err := testutil.RandKeyPair(512) | ||
sk, pk, err := testutil.SeededKeyPair(512, seed) | ||
if err != nil { | ||
t.Fatal(err) | ||
} | ||
|
@@ -71,7 +70,7 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer | |
|
||
for i := 0; i < n; i++ { | ||
addrs[i] = testutil.RandLocalTCPAddress() | ||
dhts[i] = setupDHT(ctx, t, addrs[i]) | ||
dhts[i] = setupDHT(ctx, t, addrs[i], int64(i)) | ||
peers[i] = dhts[i].self | ||
} | ||
|
||
|
@@ -120,8 +119,8 @@ func TestPing(t *testing.T) { | |
addrA := testutil.RandLocalTCPAddress() | ||
addrB := testutil.RandLocalTCPAddress() | ||
|
||
dhtA := setupDHT(ctx, t, addrA) | ||
dhtB := setupDHT(ctx, t, addrB) | ||
dhtA := setupDHT(ctx, t, addrA, 1) | ||
dhtB := setupDHT(ctx, t, addrB, 2) | ||
|
||
peerA := dhtA.self | ||
peerB := dhtB.self | ||
|
@@ -153,8 +152,8 @@ func TestValueGetSet(t *testing.T) { | |
addrA := testutil.RandLocalTCPAddress() | ||
addrB := testutil.RandLocalTCPAddress() | ||
|
||
dhtA := setupDHT(ctx, t, addrA) | ||
dhtB := setupDHT(ctx, t, addrB) | ||
dhtA := setupDHT(ctx, t, addrA, 1) | ||
dhtB := setupDHT(ctx, t, addrB, 2) | ||
|
||
defer dhtA.Close() | ||
defer dhtB.Close() | ||
|
@@ -487,12 +486,7 @@ func TestLayeredGet(t *testing.T) { | |
connect(t, ctx, dhts[1], dhts[2]) | ||
connect(t, ctx, dhts[1], dhts[3]) | ||
|
||
err := dhts[3].putLocal(u.Key("/v/hello"), []byte("world")) | ||
if err != nil { | ||
t.Fatal(err) | ||
} | ||
|
||
err = dhts[3].Provide(ctx, u.Key("/v/hello")) | ||
err := dhts[3].Provide(ctx, u.Key("/v/hello")) | ||
if err != nil { | ||
t.Fatal(err) | ||
} | ||
|
@@ -642,8 +636,8 @@ func TestConnectCollision(t *testing.T) { | |
addrA := testutil.RandLocalTCPAddress() | ||
addrB := testutil.RandLocalTCPAddress() | ||
|
||
dhtA := setupDHT(ctx, t, addrA) | ||
dhtB := setupDHT(ctx, t, addrB) | ||
dhtA := setupDHT(ctx, t, addrA, int64((rtime*2)+1)) | ||
dhtB := setupDHT(ctx, t, addrB, int64((rtime*2)+2)) | ||
|
||
peerA := dhtA.self | ||
peerB := dhtB.self | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
peer_test
intentional?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeap. prevents circular dependencies in testing.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
note the import:
. "github.com/jbenet/go-ipfs/peer"