Skip to content

Commit

Permalink
core: remove outdated tests (#27662)
Browse files Browse the repository at this point in the history
Back before #27178 , we spun up a number of ethash verifiers to verify headers. So we also had tests to ensure that we were indeed able to abort verification even if we had multiple workers running.

With PR #27178, we removed the parallelism in verification, and these tests are now failing, since we now just sequentially fire away the results as fast as possible on one routine.

This change removes the (sometimes failing) tests
  • Loading branch information
holiman authored Jul 6, 2023
1 parent cbf2579 commit c866dfd
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 122 deletions.
113 changes: 0 additions & 113 deletions core/block_validator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package core

import (
"math/big"
"runtime"
"testing"
"time"

Expand Down Expand Up @@ -235,118 +234,6 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
}
}

// Tests that concurrent header verification works, for both good and bad blocks.
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) }

func testHeaderConcurrentVerification(t *testing.T, threads int) {
// Create a simple chain to verify
var (
gspec = &Genesis{Config: params.TestChainConfig}
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)

// Run the header checker for the entire block chain at once both for a valid and
// also an invalid chain (enough if one arbitrary block is invalid).
for i, valid := range []bool{true, false} {
var results <-chan error

if valid {
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers)
chain.Stop()
} else {
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers)
chain.Stop()
}
// Wait for all the verification results
checks := make(map[int]error)
for j := 0; j < len(blocks); j++ {
select {
case result := <-results:
checks[j] = result

case <-time.After(time.Second):
t.Fatalf("test %d.%d: verification timeout", i, j)
}
}
// Check nonce check validity
for j := 0; j < len(blocks); j++ {
want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail
if (checks[j] == nil) != want {
t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want)
}
if !want {
// A few blocks after the first error may pass verification due to concurrent
// workers. We don't care about those in this test, just that the correct block
// errors out.
break
}
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d: unexpected result returned: %v", i, result)
case <-time.After(25 * time.Millisecond):
}
}
}

// Tests that aborting a header validation indeed prevents further checks from being
// run, as well as checks that no left-over goroutines are leaked.
func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) }
func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) }
func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) }

func testHeaderConcurrentAbortion(t *testing.T, threads int) {
// Create a simple chain to verify
var (
gspec = &Genesis{Config: params.TestChainConfig}
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)

// Start the verifications and immediately abort
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
defer chain.Stop()

abort, results := chain.engine.VerifyHeaders(chain, headers)
close(abort)

// Deplete the results channel
verified := 0
for depleted := false; !depleted; {
select {
case result := <-results:
if result != nil {
t.Errorf("header %d: validation failed: %v", verified, result)
}
verified++
case <-time.After(50 * time.Millisecond):
depleted = true
}
}
// Check that abortion was honored by not processing too many POWs
if verified > 2*threads {
t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads)
}
}

func TestCalcGasLimit(t *testing.T) {
for i, tc := range []struct {
pGasLimit uint64
Expand Down
18 changes: 9 additions & 9 deletions core/txpool/legacypool/legacypool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1090,7 +1090,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 2 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
Expand Down Expand Up @@ -1455,7 +1455,7 @@ func TestRepricing(t *testing.T) {
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
Expand Down Expand Up @@ -1582,7 +1582,7 @@ func TestRepricingDynamicFee(t *testing.T) {
t.Fatalf("failed to add pending transaction: %v", err)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
if err := pool.addRemote(tx); err != nil {
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
Expand Down Expand Up @@ -1723,18 +1723,18 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding an underpriced transaction on block limit fails
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
// Replace a future transaction with a future transaction
if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
t.Fatalf("failed to add well priced transaction: %v", err)
}
// Ensure that adding high priced transactions drops cheap ones, but not own
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
t.Fatalf("failed to add well priced transaction: %v", err)
}
if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
t.Fatalf("failed to add well priced transaction: %v", err)
}
if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
Expand Down Expand Up @@ -1915,11 +1915,11 @@ func TestUnderpricingDynamicFee(t *testing.T) {
}

tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1])
if err := pool.addRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
t.Fatalf("failed to add well priced transaction: %v", err)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1])
if err := pool.addRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
t.Fatalf("failed to add well priced transaction: %v", err)
}
pending, queued = pool.Stats()
Expand Down

0 comments on commit c866dfd

Please sign in to comment.