host_test.go 13.1 KB
Newer Older
1 2 3 4 5
package p2p

import (
	"context"
	"crypto/rand"
6
	"math/big"
7 8 9 10 11 12
	"net"
	"testing"
	"time"

	ds "github.com/ipfs/go-datastore"
	"github.com/ipfs/go-datastore/sync"
13
	"github.com/libp2p/go-libp2p"
14 15 16
	"github.com/libp2p/go-libp2p/core/crypto"
	"github.com/libp2p/go-libp2p/core/network"
	"github.com/libp2p/go-libp2p/core/peer"
17
	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
18
	ma "github.com/multiformats/go-multiaddr"
19
	"github.com/stretchr/testify/require"
20
	"golang.org/x/exp/slices"
21

22
	"github.com/ethereum/go-ethereum/common"
23 24 25
	"github.com/ethereum/go-ethereum/log"
	"github.com/ethereum/go-ethereum/p2p/enode"
	"github.com/ethereum/go-ethereum/rpc"
26

27
	"github.com/ethereum-optimism/optimism/op-node/metrics"
28
	"github.com/ethereum-optimism/optimism/op-node/rollup"
29
	"github.com/ethereum-optimism/optimism/op-service/eth"
30
	"github.com/ethereum-optimism/optimism/op-service/testlog"
Sabnock01's avatar
Sabnock01 committed
31
	"github.com/ethereum-optimism/optimism/op-service/testutils"
32 33 34 35 36 37 38
)

func TestingConfig(t *testing.T) *Config {
	p, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")

	return &Config{
39
		Priv:                (p).(*crypto.Secp256k1PrivateKey),
40 41 42 43 44
		DisableP2P:          false,
		NoDiscovery:         true, // we statically peer during most tests.
		ListenIP:            net.IP{127, 0, 0, 1},
		ListenTCPPort:       0, // bind to any available port
		StaticPeers:         nil,
45
		HostMux:             []libp2p.Option{YamuxC()},
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
		NoTransportSecurity: true,
		PeersLo:             1,
		PeersHi:             10,
		PeersGrace:          time.Second * 10,
		NAT:                 false,
		UserAgent:           "optimism-testing",
		TimeoutNegotiation:  time.Second * 2,
		TimeoutAccept:       time.Second * 2,
		TimeoutDial:         time.Second * 2,
		Store:               sync.MutexWrap(ds.NewMapDatastore()),
	}
}

// Simplified p2p test, to debug/test basic libp2p things with
func TestP2PSimple(t *testing.T) {
	confA := TestingConfig(t)
	confB := TestingConfig(t)
63
	hostA, err := confA.Host(testlog.Logger(t, log.LvlError).New("host", "A"), nil, metrics.NoopMetrics)
64 65
	require.NoError(t, err, "failed to launch host A")
	defer hostA.Close()
66
	hostB, err := confB.Host(testlog.Logger(t, log.LvlError).New("host", "B"), nil, metrics.NoopMetrics)
67 68 69 70 71 72 73 74
	require.NoError(t, err, "failed to launch host B")
	defer hostB.Close()
	err = hostA.Connect(context.Background(), peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()})
	require.NoError(t, err, "failed to connect to peer B from peer A")
	require.Equal(t, hostB.Network().Connectedness(hostA.ID()), network.Connected)
}

type mockGossipIn struct {
75
	OnUnsafeL2PayloadFn func(ctx context.Context, from peer.ID, msg *eth.ExecutionPayload) error
76 77
}

78
func (m *mockGossipIn) OnUnsafeL2Payload(ctx context.Context, from peer.ID, msg *eth.ExecutionPayload) error {
79 80 81 82 83 84 85 86 87 88 89 90 91 92
	if m.OnUnsafeL2PayloadFn != nil {
		return m.OnUnsafeL2PayloadFn(ctx, from, msg)
	}
	return nil
}

// Full setup, using negotiated transport security and muxes
func TestP2PFull(t *testing.T) {
	pA, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")
	pB, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")

	confA := Config{
93
		Priv:                (pA).(*crypto.Secp256k1PrivateKey),
94 95 96 97 98
		DisableP2P:          false,
		NoDiscovery:         true,
		ListenIP:            net.IP{127, 0, 0, 1},
		ListenTCPPort:       0, // bind to any available port
		StaticPeers:         nil,
99 100
		HostMux:             []libp2p.Option{YamuxC(), MplexC()},
		HostSecurity:        []libp2p.Option{NoiseC(), TlsC()},
101 102 103 104 105 106 107 108 109 110 111 112 113
		NoTransportSecurity: false,
		PeersLo:             1,
		PeersHi:             10,
		PeersGrace:          time.Second * 10,
		NAT:                 false,
		UserAgent:           "optimism-testing",
		TimeoutNegotiation:  time.Second * 2,
		TimeoutAccept:       time.Second * 2,
		TimeoutDial:         time.Second * 2,
		Store:               sync.MutexWrap(ds.NewMapDatastore()),
	}
	// copy config A, and change the settings for B
	confB := confA
114
	confB.Priv = (pB).(*crypto.Secp256k1PrivateKey)
115 116 117
	confB.Store = sync.MutexWrap(ds.NewMapDatastore())
	// TODO: maybe swap the order of sec/mux preferences, to test that negotiation works

118 119 120
	runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
	runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}

121
	logA := testlog.Logger(t, log.LvlError).New("host", "A")
122
	nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics)
123 124 125 126 127 128 129 130 131 132
	require.NoError(t, err)
	defer nodeA.Close()

	conns := make(chan network.Conn, 1)
	hostA := nodeA.Host()
	hostA.Network().Notify(&network.NotifyBundle{
		ConnectedF: func(n network.Network, conn network.Conn) {
			conns <- conn
		}})

133
	backend := NewP2PAPIBackend(nodeA, logA, nil)
134 135 136 137 138 139 140 141 142
	srv := rpc.NewServer()
	require.NoError(t, srv.RegisterName("opp2p", backend))
	client := rpc.DialInProc(srv)
	p2pClientA := NewClient(client)

	// Set up B to connect statically
	confB.StaticPeers, err = peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ID: hostA.ID(), Addrs: hostA.Addrs()})
	require.NoError(t, err)

143 144 145 146 147 148 149
	// Add address of host B itself, it shouldn't connect or cause issues.
	idB, err := peer.IDFromPublicKey(confB.Priv.GetPublic())
	require.NoError(t, err)
	altAddrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/12345/p2p/" + idB.String())
	require.NoError(t, err)
	confB.StaticPeers = append(confB.StaticPeers, altAddrB)

150 151
	logB := testlog.Logger(t, log.LvlError).New("host", "B")

152
	nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics)
153 154 155 156
	require.NoError(t, err)
	defer nodeB.Close()
	hostB := nodeB.Host()

157 158 159
	require.True(t, nodeB.IsStatic(hostA.ID()), "node A must be static peer of node B")
	require.False(t, nodeB.IsStatic(hostB.ID()), "node B must not be static peer of node B itself")

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	select {
	case <-time.After(time.Second):
		t.Fatal("failed to connect new host")
	case c := <-conns:
		require.Equal(t, hostB.ID(), c.RemotePeer())
	}

	ctx := context.Background()

	selfInfoA, err := p2pClientA.Self(ctx)
	require.NoError(t, err)
	require.Equal(t, selfInfoA.PeerID, hostA.ID())

	_, err = p2pClientA.DiscoveryTable(ctx)
	// rpc does not preserve error type
175
	require.Equal(t, err.Error(), ErrDisabledDiscovery.Error(), "expecting discv5 to be disabled")
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

	require.NoError(t, p2pClientA.BlockPeer(ctx, hostB.ID()))
	blockedPeers, err := p2pClientA.ListBlockedPeers(ctx)
	require.NoError(t, err)
	require.Equal(t, []peer.ID{hostB.ID()}, blockedPeers)
	require.NoError(t, p2pClientA.UnblockPeer(ctx, hostB.ID()))

	require.NoError(t, p2pClientA.BlockAddr(ctx, net.IP{123, 123, 123, 123}))
	blockedIPs, err := p2pClientA.ListBlockedAddrs(ctx)
	require.NoError(t, err)
	require.Len(t, blockedIPs, 1)
	require.Equal(t, net.IP{123, 123, 123, 123}, blockedIPs[0].To4())
	require.NoError(t, p2pClientA.UnblockAddr(ctx, net.IP{123, 123, 123, 123}))

	subnet := &net.IPNet{IP: net.IP{123, 0, 0, 0}.To16(), Mask: net.IPMask{0xff, 0, 0, 0}}
	require.NoError(t, p2pClientA.BlockSubnet(ctx, subnet))
	blockedSubnets, err := p2pClientA.ListBlockedSubnets(ctx)
	require.NoError(t, err)
	require.Len(t, blockedSubnets, 1)
	require.Equal(t, subnet, blockedSubnets[0])
	require.NoError(t, p2pClientA.UnblockSubnet(ctx, subnet))

	// Ask host A for all peer information they have
	peerDump, err := p2pClientA.Peers(ctx, false)
	require.Nil(t, err)
	require.Contains(t, peerDump.Peers, hostB.ID().String())
	data := peerDump.Peers[hostB.ID().String()]
	require.Equal(t, data.Direction, network.DirInbound)

	stats, err := p2pClientA.PeerStats(ctx)
	require.Nil(t, err)
	require.Equal(t, uint(1), stats.Connected)

	// disconnect
	require.NoError(t, p2pClientA.DisconnectPeer(ctx, hostB.ID()))
	peerDump, err = p2pClientA.Peers(ctx, false)
	require.Nil(t, err)
	data = peerDump.Peers[hostB.ID().String()]
	require.Equal(t, data.Connectedness, network.NotConnected)

	// reconnect
	addrsB, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()})
	require.NoError(t, err)
	require.NoError(t, p2pClientA.ConnectPeer(ctx, addrsB[0].String()))

	require.NoError(t, p2pClientA.ProtectPeer(ctx, hostB.ID()))
	require.NoError(t, p2pClientA.UnprotectPeer(ctx, hostB.ID()))
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
func TestDiscovery(t *testing.T) {
	pA, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")
	pB, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")
	pC, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
	require.NoError(t, err, "failed to generate new p2p priv key")

	logA := testlog.Logger(t, log.LvlError).New("host", "A")
	logB := testlog.Logger(t, log.LvlError).New("host", "B")
	logC := testlog.Logger(t, log.LvlError).New("host", "C")

	discDBA, err := enode.OpenDB("") // "" = memory db
	require.NoError(t, err)
	discDBB, err := enode.OpenDB("")
	require.NoError(t, err)
	discDBC, err := enode.OpenDB("")
	require.NoError(t, err)

	rollupCfg := &rollup.Config{L2ChainID: big.NewInt(901)}

	confA := Config{
247
		Priv:                (pA).(*crypto.Secp256k1PrivateKey),
248 249 250 251 252 253 254
		DisableP2P:          false,
		NoDiscovery:         false,
		AdvertiseIP:         net.IP{127, 0, 0, 1},
		ListenUDPPort:       0, // bind to any available port
		ListenIP:            net.IP{127, 0, 0, 1},
		ListenTCPPort:       0, // bind to any available port
		StaticPeers:         nil,
255 256
		HostMux:             []libp2p.Option{YamuxC(), MplexC()},
		HostSecurity:        []libp2p.Option{NoiseC(), TlsC()},
257 258 259 260 261 262 263 264 265 266 267 268 269 270
		NoTransportSecurity: false,
		PeersLo:             1,
		PeersHi:             10,
		PeersGrace:          time.Second * 10,
		NAT:                 false,
		UserAgent:           "optimism-testing",
		TimeoutNegotiation:  time.Second * 2,
		TimeoutAccept:       time.Second * 2,
		TimeoutDial:         time.Second * 2,
		Store:               sync.MutexWrap(ds.NewMapDatastore()),
		DiscoveryDB:         discDBA,
	}
	// copy config A, and change the settings for B
	confB := confA
271
	confB.Priv = (pB).(*crypto.Secp256k1PrivateKey)
272 273 274
	confB.Store = sync.MutexWrap(ds.NewMapDatastore())
	confB.DiscoveryDB = discDBB

275 276 277 278
	runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
	runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
	runCfgC := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}

279 280 281
	resourcesCtx, resourcesCancel := context.WithCancel(context.Background())
	defer resourcesCancel()

282
	nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics)
283 284 285 286 287 288 289 290 291
	require.NoError(t, err)
	defer nodeA.Close()
	hostA := nodeA.Host()
	go nodeA.DiscoveryProcess(resourcesCtx, logA, rollupCfg, 10)

	// Add A as bootnode to B
	confB.Bootnodes = []*enode.Node{nodeA.Dv5Udp().Self()}
	// Copy B config to C, and ensure they have a different priv / peerstore
	confC := confB
292
	confC.Priv = (pC).(*crypto.Secp256k1PrivateKey)
293 294 295 296
	confC.Store = sync.MutexWrap(ds.NewMapDatastore())
	confB.DiscoveryDB = discDBC

	// Start B
297
	nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics)
298 299 300 301 302 303 304 305 306 307 308 309 310 311
	require.NoError(t, err)
	defer nodeB.Close()
	hostB := nodeB.Host()
	go nodeB.DiscoveryProcess(resourcesCtx, logB, rollupCfg, 10)

	// Track connections to B
	connsB := make(chan network.Conn, 2)
	hostB.Network().Notify(&network.NotifyBundle{
		ConnectedF: func(n network.Network, conn network.Conn) {
			log.Info("connection to B", "peer", conn.RemotePeer())
			connsB <- conn
		}})

	// Start C
312
	nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, nil, runCfgC, metrics.NoopMetrics)
313 314 315 316 317 318 319
	require.NoError(t, err)
	defer nodeC.Close()
	hostC := nodeC.Host()
	go nodeC.DiscoveryProcess(resourcesCtx, logC, rollupCfg, 10)

	// B and C don't know each other yet, but both have A as a bootnode.
	// It should only be a matter of time for them to connect, if they discover each other via A.
320
	timeout := time.After(time.Second * 60)
321 322 323 324
	var peersOfB []peer.ID
	// B should be connected to the bootnode (A) it used (it's a valid optimism node to connect to here)
	// C should also be connected, although this one might take more time to discover
	for !slices.Contains(peersOfB, hostA.ID()) || !slices.Contains(peersOfB, hostC.ID()) {
325
		select {
326 327 328 329 330 331
		case <-timeout:
			var peers []string
			for _, id := range peersOfB {
				peers = append(peers, id.String())
			}
			t.Fatalf("timeout reached - expected host A: %v and host C: %v to be in %v", hostA.ID().String(), hostC.ID().String(), peers)
332
		case c := <-connsB:
333
			peersOfB = append(peersOfB, c.RemotePeer())
334 335 336 337
		}
	}
}

338 339 340 341 342 343 344 345 346 347 348
// Most tests should use mocknets instead of using the actual local host network
func TestP2PMocknet(t *testing.T) {
	mnet, err := mocknet.FullMeshConnected(3)
	require.NoError(t, err, "failed to setup mocknet")
	defer mnet.Close()
	hosts := mnet.Hosts()
	hostA, hostB, hostC := hosts[0], hosts[1], hosts[2]
	require.Equal(t, hostA.Network().Connectedness(hostB.ID()), network.Connected)
	require.Equal(t, hostA.Network().Connectedness(hostC.ID()), network.Connected)
	require.Equal(t, hostB.Network().Connectedness(hostC.ID()), network.Connected)
}