Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Close conn in quic listener when wrapping fails #2852

Merged
merged 3 commits into from
Jul 1, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions p2p/test/transport/transport_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/sec"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/libp2p/go-libp2p/p2p/muxer/yamux"
Expand All @@ -29,8 +31,10 @@
"github.com/libp2p/go-libp2p/p2p/security/noise"
tls "github.com/libp2p/go-libp2p/p2p/security/tls"
libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
"go.uber.org/mock/gomock"

"github.com/multiformats/go-multiaddr"

Check failure on line 36 in p2p/test/transport/transport_test.go

View workflow job for this annotation

GitHub Actions / go-check / All

package "github.com/multiformats/go-multiaddr" is being imported more than once (ST1019)
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -688,3 +692,34 @@
})
}
}

// TestCloseConnWhenBlocked tests that the server closes the connection when the rcmgr blocks it.
func TestCloseConnWhenBlocked(t *testing.T) {
for _, tc := range transportsToTest {
if tc.Name == "WebRTC" {
continue // WebRTC doesn't have a connection when we block so there's nothing to close
Comment on lines +698 to +699
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we test this, NewStream on client will still fail, which is what we want. It'll fail with DeadlineExceeded though

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we should. It's not testing what we think. The only thing it tests is whether the server ignores us and we hit a deadline.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we could make another tests that just focuses on that behavior for WebRTC though?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would test that if the respurce manager blocks no connection is established. But we can handle that separately. I am fine with keeping it as is.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

}
t.Run(tc.Name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRcmgr := mocknetwork.NewMockResourceManager(ctrl)
mockRcmgr.EXPECT().OpenConnection(network.DirInbound, gomock.Any(), gomock.Any()).DoAndReturn(func(network.Direction, bool, ma.Multiaddr) (network.ConnManagementScope, error) {
// Block the connection
return nil, fmt.Errorf("connections blocked")
})
mockRcmgr.EXPECT().Close().AnyTimes()

server := tc.HostGenerator(t, TransportTestCaseOpts{ResourceManager: mockRcmgr})
client := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
defer server.Close()
defer client.Close()

client.Peerstore().AddAddrs(server.ID(), server.Addrs(), peerstore.PermanentAddrTTL)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, err := client.NewStream(ctx, server.ID(), ping.ID)
require.Error(t, err)
require.False(t, errors.Is(err, context.DeadlineExceeded), "expected error to be not be context deadline exceeded")
Comment on lines +719 to +721
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will succeed if the implementation is using lazy multistream negotiation, right?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In which case the require.Error will fail. I haven't seen that happen which is probably because we require the identify to happen before returing a new stream (a separate issue). For now I think let's leave this as is and when we change the Identify/NewStream interaction we can update this test.

})
}
}
15 changes: 9 additions & 6 deletions p2p/transport/quic/listener.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,10 @@ func (l *listener) Accept() (tpt.CapableConn, error) {
if err != nil {
return nil, err
}
c, err := l.setupConn(qconn)
c, err := l.wrapConn(qconn)
if err != nil {
log.Debugf("failed to setup connection: %s", err)
qconn.CloseWithError(1, "")
continue
}
l.transport.addConn(qconn, c)
Expand All @@ -79,7 +81,10 @@ func (l *listener) Accept() (tpt.CapableConn, error) {
}
}

func (l *listener) setupConn(qconn quic.Connection) (*conn, error) {
// wrapConn wraps a QUIC connection into a libp2p [tpt.CapableConn].
// If wrapping fails. The caller is responsible for cleaning up the
// connection.
func (l *listener) wrapConn(qconn quic.Connection) (*conn, error) {
remoteMultiaddr, err := quicreuse.ToQuicMultiaddr(qconn.RemoteAddr(), qconn.ConnectionState().Version)
if err != nil {
return nil, err
Expand All @@ -90,18 +95,16 @@ func (l *listener) setupConn(qconn quic.Connection) (*conn, error) {
log.Debugw("resource manager blocked incoming connection", "addr", qconn.RemoteAddr(), "error", err)
return nil, err
}
c, err := l.setupConnWithScope(qconn, connScope, remoteMultiaddr)
c, err := l.wrapConnWithScope(qconn, connScope, remoteMultiaddr)
if err != nil {
connScope.Done()
qconn.CloseWithError(1, "")
return nil, err
}

return c, nil
}

func (l *listener) setupConnWithScope(qconn quic.Connection, connScope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr) (*conn, error) {

func (l *listener) wrapConnWithScope(qconn quic.Connection, connScope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr) (*conn, error) {
// The tls.Config used to establish this connection already verified the certificate chain.
// Since we don't have any way of knowing which tls.Config was used though,
// we have to re-determine the peer's identity here.
Expand Down
53 changes: 53 additions & 0 deletions p2p/transport/quic/listener_test.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package libp2pquic

import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
Expand All @@ -12,8 +14,11 @@ import (

ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
tpt "github.com/libp2p/go-libp2p/core/transport"
"github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
"github.com/quic-go/quic-go"
"go.uber.org/mock/gomock"

ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -113,3 +118,51 @@ func TestCorrectNumberOfVirtualListeners(t *testing.T) {
ln.Close()
require.Empty(t, tpt.listeners[udpAddr.String()])
}

func TestCleanupConnWhenBlocked(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRcmgr := mocknetwork.NewMockResourceManager(ctrl)
mockRcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Any()).DoAndReturn(func(network.Direction, bool, ma.Multiaddr) (network.ConnManagementScope, error) {
// Block the connection
return nil, fmt.Errorf("connections blocked")
})

server := newTransport(t, mockRcmgr)
serverTpt := server.(*transport)
defer server.(io.Closer).Close()

localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
ln, err := server.Listen(localAddrV1)
require.NoError(t, err)
defer ln.Close()
go ln.Accept()

client := newTransport(t, nil)
ctx := context.Background()

var quicErr *quic.ApplicationError = &quic.ApplicationError{}
conn, err := client.Dial(ctx, ln.Multiaddr(), serverTpt.localPeer)
if err != nil && errors.As(err, &quicErr) {
// We hit our expected application error
return
}

// No error yet, let's continue using the conn
s, err := conn.OpenStream(ctx)
if err != nil && errors.As(err, &quicErr) {
// We hit our expected application error
return
}

// No error yet, let's continue using the conn
s.SetReadDeadline(time.Now().Add(1 * time.Second))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can keep this 10 seconds. It will error before that any way.

b := [1]byte{}
_, err = s.Read(b[:])
if err != nil && errors.As(err, &quicErr) {
// We hit our expected application error
return
}

t.Fatalf("expected application error, got %v", err)
}
Loading