Skip to content

Commit

Permalink
timeout handling
Browse files Browse the repository at this point in the history
  • Loading branch information
dennis-tra committed Oct 16, 2024
1 parent eb612f9 commit 53aca12
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 8 deletions.
24 changes: 19 additions & 5 deletions discv4/crawler.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,14 @@ func (c *Crawler) crawlDiscV4(ctx context.Context, pi PeerInfo) <-chan DiscV4Res
// the number of probes to issue against bucket 0
probes := 3

closestMap, closestSet, err := c.probeBucket0(pi, probes)
closestMap, closestSet, respondedAt, err := c.probeBucket0(pi, probes, result.RespondedAt != nil)

if err == nil {
// track the respondedAt timestamp if it wasn't already set
if result.RespondedAt != nil && !respondedAt.IsZero() {
result.RespondedAt = &respondedAt
}

result.Strategy = determineStrategy(closestSet)

var remainingClosest map[peer.ID]PeerInfo
Expand Down Expand Up @@ -241,8 +246,9 @@ func (c *Crawler) crawlDiscV4(ctx context.Context, pi PeerInfo) <-chan DiscV4Res
return resultCh
}

func (c *Crawler) probeBucket0(pi PeerInfo, probes int) (map[peer.ID]PeerInfo, []mapset.Set[peer.ID], error) {
func (c *Crawler) probeBucket0(pi PeerInfo, probes int, returnedENR bool) (map[peer.ID]PeerInfo, []mapset.Set[peer.ID], time.Time, error) {
var (
respondedAt time.Time
closestMap = make(map[peer.ID]PeerInfo)
closestSets []mapset.Set[peer.ID]
errs []error
Expand All @@ -257,13 +263,21 @@ func (c *Crawler) probeBucket0(pi PeerInfo, probes int) (map[peer.ID]PeerInfo, [
// first, we generate a random key that falls into bucket 0
targetKey, err := GenRandomPublicKey(pi.Node.ID(), 0)
if err != nil {
return nil, nil, err
return nil, nil, time.Time{}, err
}

// second, we do the Find node request
closest, err := c.listener.FindNode(pi.Node.ID(), pi.udpAddr, targetKey)
if err != nil {
// exit early if the node hasn't returned an ENR and the first probe
// also timed out
if !returnedENR && errors.Is(err, discover.ErrTimeout) {
return nil, nil, time.Time{}, fmt.Errorf("failed to probe bucket 0: %w", discover.ErrTimeout)
}

errs = append(errs, err)
} else if !respondedAt.IsZero() {
respondedAt = time.Now()
}

// third, we parse the responses into our [PeerInfo] struct
Expand All @@ -281,10 +295,10 @@ func (c *Crawler) probeBucket0(pi PeerInfo, probes int) (map[peer.ID]PeerInfo, [
}

if len(errs) == probes {
return nil, nil, fmt.Errorf("failed to probe bucket 0: %w", errors.Join(errs...))
return nil, nil, time.Time{}, fmt.Errorf("failed to probe bucket 0: %w", errors.Join(errs...))
}

return closestMap, closestSets, nil
return closestMap, closestSets, respondedAt, nil
}

type CrawlStrategy string
Expand Down
5 changes: 4 additions & 1 deletion discv4/driver_crawler.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func (cfg *CrawlDriverConfig) CrawlerConfig() *CrawlerConfig {
DialTimeout: cfg.DialTimeout,
AddrDialType: cfg.AddrDialType,
LogErrors: cfg.LogErrors,
MaxJitter: time.Duration(cfg.CrawlWorkerCount/100) * time.Second, // 3000 workers -> distributed over 30s
MaxJitter: time.Duration(cfg.CrawlWorkerCount/50) * time.Second, // e.g., 3000 workers evenly distributed over 60s
KeepENR: false,
}
}
Expand Down Expand Up @@ -204,6 +204,9 @@ func NewCrawlDriver(dbc db.Client, crawl *models.Crawl, cfg *CrawlDriverConfig)
return nil, fmt.Errorf("create unhandled packets counter: %w", err)
}

// set the discovery response timeout
discover.RespTimeout = 2 * time.Second

d := &CrawlDriver{
cfg: cfg,
dbc: dbc,
Expand Down
3 changes: 1 addition & 2 deletions discv4/gen_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,8 @@ import (

"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/stretchr/testify/require"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestGenRandomPublicKey(t *testing.T) {
Expand Down

0 comments on commit 53aca12

Please sign in to comment.