Commit 5e14a615 authored by Axel Kingsley's avatar Axel Kingsley Committed by GitHub

Interop: SuperSystem for E2E Tests (#11850)

* op-e2e: interop test setup (work in progress)

* op-e2e: interop test setup

* organization and comment updates

* refactor creation code into WIP system2

* save secrets per L2

* Add SuperSystem Interface ; Add Users and Transactions

* Further Refactoring ; Fix Test

* Add Supervisor

* Add Supervisor Client

* Comment out Proposer

* Add AddL2RPC to Supervisor Client

* Fully link Supervisor and OP Node in E2E Test

* correct RPC call supervisor_checkBlock

* Make EOF acceptable for backend check

* final structure names

* Change unused functions to _ for linter

* fix import order

* Add Github Issue Numbers to TODOs

* tynes comments: add World Resource as configurable

---------
Co-authored-by: default avatarprotolambda <proto@protolambda.com>
parent f70248a0
package setuputils
import (
"crypto/ecdsa"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-service/endpoint"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
func hexPriv(in *ecdsa.PrivateKey) string {
b := e2eutils.EncodePrivKey(in)
return hexutil.Encode(b)
}
func NewTxMgrConfig(l1Addr endpoint.RPC, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
return txmgr.CLIConfig{
L1RPCURL: l1Addr.RPC(),
PrivateKey: hexPriv(privKey),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
FeeLimitMultiplier: 5,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
}
}
package interop
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-chain-ops/interopgen"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/stretchr/testify/require"
)
// TestInteropTrivial tests a simple interop scenario
// Chains A and B exist, but no messages are sent between them
// and in fact no event-logs are emitted by either chain at all.
// A transaction is sent from Alice to Bob on Chain A.
// The balance of Bob on Chain A is checked before and after the tx.
// The balance of Bob on Chain B is checked after the tx.
func TestInteropTrivial(t *testing.T) {
recipe := interopgen.InteropDevRecipe{
L1ChainID: 900100,
L2ChainIDs: []uint64{900200, 900201},
GenesisTimestamp: uint64(time.Now().Unix() + 3), // start chain 3 seconds from now
}
worldResources := worldResourcePaths{
foundryArtifacts: "../../packages/contracts-bedrock/forge-artifacts",
sourceMap: "../../packages/contracts-bedrock",
}
// create a super system from the recipe
// and get the L2 IDs for use in the test
s2 := NewSuperSystem(t, &recipe, worldResources)
ids := s2.L2IDs()
// chainA is the first L2 chain
chainA := ids[0]
// chainB is the second L2 chain
chainB := ids[1]
// create two users on all L2 chains
s2.AddUser("Alice")
s2.AddUser("Bob")
bobAddr := s2.Address(chainA, "Bob")
// check the balance of Bob
clientA := s2.L2GethClient(chainA)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
bobBalance, err := clientA.BalanceAt(ctx, bobAddr, nil)
require.NoError(t, err)
expectedBalance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10)
require.Equal(t, expectedBalance, bobBalance)
// send a tx from Alice to Bob
s2.SendL2Tx(
chainA,
"Alice",
func(l2Opts *op_e2e.TxOpts) {
l2Opts.ToAddr = &bobAddr
l2Opts.Value = big.NewInt(1000000)
l2Opts.GasFeeCap = big.NewInt(1_000_000_000)
l2Opts.GasTipCap = big.NewInt(1_000_000_000)
},
)
// check the balance of Bob after the tx
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
bobBalance, err = clientA.BalanceAt(ctx, bobAddr, nil)
require.NoError(t, err)
expectedBalance, _ = big.NewInt(0).SetString("10000000000000000001000000", 10)
require.Equal(t, expectedBalance, bobBalance)
// check that the balance of Bob on ChainB hasn't changed
bobAddrB := s2.Address(chainB, "Bob")
clientB := s2.L2GethClient(chainB)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
bobBalance, err = clientB.BalanceAt(ctx, bobAddrB, nil)
require.NoError(t, err)
expectedBalance, _ = big.NewInt(0).SetString("10000000000000000000000000", 10)
require.Equal(t, expectedBalance, bobBalance)
}
package interop
import (
"context"
"crypto/ecdsa"
"math/big"
"os"
"path"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
bss "github.com/ethereum-optimism/optimism/op-batcher/batcher"
batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-chain-ops/devkeys"
"github.com/ethereum-optimism/optimism/op-chain-ops/foundry"
"github.com/ethereum-optimism/optimism/op-chain-ops/interopgen"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/services"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils"
"github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum-optimism/optimism/op-service/dial"
"github.com/ethereum-optimism/optimism/op-service/endpoint"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/oppprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
supervisorConfig "github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor"
)
// SuperSystem is an interface for the system (collection of connected resources)
// it provides a way to get the resources for a network by network ID
// and provides a way to get the list of network IDs
// this is useful for testing multiple network backends,
// for example, interopE2ESystem is the default implementation, but a shim to
// kurtosis or another testing framework could be implemented
type SuperSystem interface {
// get the supervisor
Supervisor() *supervisor.SupervisorService
// get the supervisor client
SupervisorClient() *sources.SupervisorClient
// get the batcher for a network
Batcher(network string) *bss.BatcherService
// get the proposer for a network
Proposer(network string) *l2os.ProposerService
// get the opnode for a network
OpNode(network string) *opnode.Opnode
// get the geth instance for a network
L2Geth(network string) *geth.GethInstance
// get the L2 geth client for a network
L2GethClient(network string) *ethclient.Client
// get the secret for a network and role
L2OperatorKey(network string, role devkeys.ChainOperatorRole) ecdsa.PrivateKey
// get the list of network IDs
L2IDs() []string
// register a username to an account on all L2s
AddUser(username string)
// get the user key for a user on an L2
UserKey(id, username string) ecdsa.PrivateKey
// send a transaction on an L2 on the given network, from the given user
SendL2Tx(network string, username string, applyTxOpts op_e2e.TxOptsFn) *types.Receipt
// get the address for a user on an L2
Address(network string, username string) common.Address
}
// NewSuperSystem creates a new SuperSystem from a recipe. It creates an interopE2ESystem.
func NewSuperSystem(t *testing.T, recipe *interopgen.InteropDevRecipe, w worldResourcePaths) SuperSystem {
s2 := &interopE2ESystem{recipe: recipe}
s2.prepare(t, w)
return s2
}
// interopE2ESystem implements the SuperSystem interface
// it prepares network resources and provides access to them
// the functionality is broken down into smaller functions so that
// the system can be prepared iteratively if desired
type interopE2ESystem struct {
t *testing.T
recipe *interopgen.InteropDevRecipe
logger log.Logger
hdWallet *devkeys.MnemonicDevKeys
worldDeployment *interopgen.WorldDeployment
worldOutput *interopgen.WorldOutput
beacon *fakebeacon.FakeBeacon
l1 *geth.GethInstance
l2s map[string]l2Set
l2GethClients map[string]*ethclient.Client
supervisor *supervisor.SupervisorService
superClient *sources.SupervisorClient
}
// l2Set is a set of resources for an L2 chain
type l2Set struct {
chainID *big.Int
opNode *opnode.Opnode
l2Geth *geth.GethInstance
proposer *l2os.ProposerService
batcher *bss.BatcherService
operatorKeys map[devkeys.ChainOperatorRole]ecdsa.PrivateKey
userKeys map[string]ecdsa.PrivateKey
}
// prepareHDWallet creates a new HD wallet to derive keys from
func (s *interopE2ESystem) prepareHDWallet() *devkeys.MnemonicDevKeys {
hdWallet, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic)
require.NoError(s.t, err)
return hdWallet
}
type worldResourcePaths struct {
foundryArtifacts string
sourceMap string
}
// prepareWorld creates the world configuration from the recipe and deploys it
func (s *interopE2ESystem) prepareWorld(w worldResourcePaths) (*interopgen.WorldDeployment, *interopgen.WorldOutput) {
// Build the world configuration from the recipe and the HD wallet
worldCfg, err := s.recipe.Build(s.hdWallet)
require.NoError(s.t, err)
// create a logger for the world configuration
logger := s.logger.New("role", "world")
require.NoError(s.t, worldCfg.Check(logger))
// create the foundry artifacts and source map
foundryArtifacts := foundry.OpenArtifactsDir(w.foundryArtifacts)
sourceMap := foundry.NewSourceMapFS(os.DirFS(w.sourceMap))
// deploy the world, using the logger, foundry artifacts, source map, and world configuration
worldDeployment, worldOutput, err := interopgen.Deploy(logger, foundryArtifacts, sourceMap, worldCfg)
require.NoError(s.t, err)
return worldDeployment, worldOutput
}
// prepareL1 creates the L1 chain resources
func (s *interopE2ESystem) prepareL1() (*fakebeacon.FakeBeacon, *geth.GethInstance) {
// Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2
genesisTimestampL1 := s.worldOutput.L1.Genesis.Timestamp
blockTimeL1 := uint64(6)
blobPath := s.t.TempDir()
bcn := fakebeacon.NewBeacon(s.logger.New("role", "l1_cl"),
filepath.Join(blobPath, "l1_cl"), genesisTimestampL1, blockTimeL1)
s.t.Cleanup(func() {
_ = bcn.Close()
})
require.NoError(s.t, bcn.Start("127.0.0.1:0"))
beaconApiAddr := bcn.BeaconAddr()
require.NotEmpty(s.t, beaconApiAddr, "beacon API listener must be up")
l1FinalizedDistance := uint64(3)
l1Clock := clock.SystemClock
// Start the L1 chain
l1Geth, err := geth.InitL1(
blockTimeL1,
l1FinalizedDistance,
s.worldOutput.L1.Genesis,
l1Clock,
filepath.Join(blobPath, "l1_el"),
bcn)
require.NoError(s.t, err)
require.NoError(s.t, l1Geth.Node.Start())
s.t.Cleanup(func() {
_ = l1Geth.Close()
})
return bcn, l1Geth
}
// newOperatorKeysForL2 creates the operator keys for an L2 chain
// it uses an L2Output to determine the chain ID and configuration,
// and then makes a key for each operator role [SequencerP2PRole, ProposerRole, BatcherRole]
func (s *interopE2ESystem) newOperatorKeysForL2(l2Out *interopgen.L2Output) map[devkeys.ChainOperatorRole]ecdsa.PrivateKey {
// Create operatorKeys for the L2 chain actors
operatorKeys := map[devkeys.ChainOperatorRole]ecdsa.PrivateKey{}
// create the sequencer P2P secret
seqP2PSecret, err := s.hdWallet.Secret(devkeys.ChainOperatorKey{
ChainID: l2Out.Genesis.Config.ChainID,
Role: devkeys.SequencerP2PRole,
})
require.NoError(s.t, err)
operatorKeys[devkeys.SequencerP2PRole] = *seqP2PSecret
// create the proposer secret
proposerSecret, err := s.hdWallet.Secret(devkeys.ChainOperatorKey{
ChainID: l2Out.Genesis.Config.ChainID,
Role: devkeys.ProposerRole,
})
require.NoError(s.t, err)
operatorKeys[devkeys.ProposerRole] = *proposerSecret
// create the batcher secret
batcherSecret, err := s.hdWallet.Secret(devkeys.ChainOperatorKey{
ChainID: l2Out.Genesis.Config.ChainID,
Role: devkeys.BatcherRole,
})
require.NoError(s.t, err)
operatorKeys[devkeys.BatcherRole] = *batcherSecret
return operatorKeys
}
// newGethForL2 creates a new Geth instance for an L2 chain
func (s *interopE2ESystem) newGethForL2(id string, l2Out *interopgen.L2Output) *geth.GethInstance {
jwtPath := writeDefaultJWT(s.t)
name := "l2-" + id
l2Geth, err := geth.InitL2(name, l2Out.Genesis, jwtPath)
require.NoError(s.t, err)
require.NoError(s.t, l2Geth.Node.Start())
s.t.Cleanup(func() {
_ = l2Geth.Close()
})
return l2Geth
}
// newNodeForL2 creates a new Opnode for an L2 chain
func (s *interopE2ESystem) newNodeForL2(
id string,
l2Out *interopgen.L2Output,
operatorKeys map[devkeys.ChainOperatorRole]ecdsa.PrivateKey,
l2Geth *geth.GethInstance,
) *opnode.Opnode {
logger := s.logger.New("role", "op-node-"+id)
p2pKey := operatorKeys[devkeys.SequencerP2PRole]
nodeCfg := &node.Config{
L1: &node.PreparedL1Endpoint{
Client: client.NewBaseRPCClient(endpoint.DialRPC(
endpoint.PreferAnyRPC,
s.l1.UserRPC(),
mustDial(s.t, logger))),
TrustRPC: false,
RPCProviderKind: sources.RPCKindDebugGeth,
},
L2: &node.L2EndpointConfig{
L2EngineAddr: l2Geth.AuthRPC().RPC(),
L2EngineJWTSecret: testingJWTSecret,
},
Beacon: &node.L1BeaconEndpointConfig{
BeaconAddr: s.beacon.BeaconAddr(),
},
Driver: driver.Config{
SequencerEnabled: true,
},
Rollup: *l2Out.RollupCfg,
P2PSigner: &p2p.PreparedSigner{
Signer: p2p.NewLocalSigner(&p2pKey)},
RPC: node.RPCConfig{
ListenAddr: "127.0.0.1",
ListenPort: 0,
EnableAdmin: true,
},
Supervisor: &node.SupervisorEndpointConfig{
SupervisorAddr: s.supervisor.RPC(),
},
P2P: nil, // disabled P2P setup for now
L1EpochPollInterval: time.Second * 2,
RuntimeConfigReloadInterval: 0,
Tracer: nil,
Sync: sync.Config{
SyncMode: sync.CLSync,
SkipSyncStartCheck: false,
SupportsPostFinalizationELSync: false,
},
ConfigPersistence: node.DisabledConfigPersistence{},
}
opNode, err := opnode.NewOpnode(logger.New("service", "op-node"),
nodeCfg, func(err error) {
s.t.Error(err)
})
require.NoError(s.t, err)
s.t.Cleanup(func() {
ctx, cancel := context.WithCancel(context.Background())
cancel() // force-quit
_ = opNode.Stop(ctx)
})
return opNode
}
// newProposerForL2 creates a new Proposer for an L2 chain
// it is currently unused, as the generated world does not have a DisputeGameFactoryProxy
// TODO(#11888): name this function "newProposerForL2" and use it in the prepareL2s function when the DisputeGameFactoryProxy is available
func (s *interopE2ESystem) _(
id string,
operatorKeys map[devkeys.ChainOperatorRole]ecdsa.PrivateKey,
opNode *opnode.Opnode,
) *l2os.ProposerService {
proposerSecret := operatorKeys[devkeys.ProposerRole]
logger := s.logger.New("role", "proposer"+id)
proposerCLIConfig := &l2os.CLIConfig{
L1EthRpc: s.l1.UserRPC().RPC(),
RollupRpc: opNode.UserRPC().RPC(),
DGFAddress: s.worldDeployment.L2s[id].DisputeGameFactoryProxy.Hex(),
ProposalInterval: 6 * time.Second,
DisputeGameType: 254, // Fast game type
PollInterval: 500 * time.Millisecond,
TxMgrConfig: setuputils.NewTxMgrConfig(s.l1.UserRPC(), &proposerSecret),
AllowNonFinalized: false,
LogConfig: oplog.CLIConfig{
Level: log.LvlInfo,
Format: oplog.FormatText,
},
}
proposer, err := l2os.ProposerServiceFromCLIConfig(
context.Background(),
"0.0.1",
proposerCLIConfig,
logger.New("service", "proposer"))
require.NoError(s.t, err, "must start proposer")
require.NoError(s.t, proposer.Start(context.Background()))
s.t.Cleanup(func() {
ctx, cancel := context.WithCancel(context.Background())
cancel() // force-quit
_ = proposer.Stop(ctx)
})
return proposer
}
// newBatcherForL2 creates a new Batcher for an L2 chain
func (s *interopE2ESystem) newBatcherForL2(
id string,
operatorKeys map[devkeys.ChainOperatorRole]ecdsa.PrivateKey,
l2Geth *geth.GethInstance,
opNode *opnode.Opnode,
) *bss.BatcherService {
batcherSecret := operatorKeys[devkeys.BatcherRole]
logger := s.logger.New("role", "batcher"+id)
batcherCLIConfig := &bss.CLIConfig{
L1EthRpc: s.l1.UserRPC().RPC(),
L2EthRpc: l2Geth.UserRPC().RPC(),
RollupRpc: opNode.UserRPC().RPC(),
MaxPendingTransactions: 1,
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TestUseMaxTxSizeForBlobs: false,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: setuputils.NewTxMgrConfig(s.l1.UserRPC(), &batcherSecret),
LogConfig: oplog.CLIConfig{
Level: log.LevelInfo,
Format: oplog.FormatText,
},
Stopped: false,
BatchType: derive.SpanBatchType,
MaxBlocksPerSpanBatch: 10,
DataAvailabilityType: batcherFlags.CalldataType,
CompressionAlgo: derive.Brotli,
}
batcher, err := bss.BatcherServiceFromCLIConfig(
context.Background(), "0.0.1", batcherCLIConfig,
logger.New("service", "batcher"))
require.NoError(s.t, err)
require.NoError(s.t, batcher.Start(context.Background()))
s.t.Cleanup(func() {
ctx, cancel := context.WithCancel(context.Background())
cancel() // force-quit
_ = batcher.Stop(ctx)
})
return batcher
}
// newL2 creates a new L2, starting with the L2Output from the world configuration
// and iterating through the resources needed for the L2.
// it returns a l2Set with the resources for the L2
func (s *interopE2ESystem) newL2(id string, l2Out *interopgen.L2Output) l2Set {
operatorKeys := s.newOperatorKeysForL2(l2Out)
l2Geth := s.newGethForL2(id, l2Out)
opNode := s.newNodeForL2(id, l2Out, operatorKeys, l2Geth)
// TODO(#11886): proposer does not work with the generated world as there is no DisputeGameFactoryProxy
//proposer := s.newProposerForL2(id, operatorKeys, opNode)
batcher := s.newBatcherForL2(id, operatorKeys, l2Geth, opNode)
return l2Set{
chainID: l2Out.Genesis.Config.ChainID,
opNode: opNode,
l2Geth: l2Geth,
proposer: nil,
batcher: batcher,
operatorKeys: operatorKeys,
userKeys: make(map[string]ecdsa.PrivateKey),
}
}
// prepareSupervisor creates a new supervisor for the system
func (s *interopE2ESystem) prepareSupervisor() *supervisor.SupervisorService {
logger := s.logger.New("role", "supervisor")
cfg := supervisorConfig.Config{
MetricsConfig: metrics.CLIConfig{
Enabled: false,
},
PprofConfig: oppprof.CLIConfig{
ListenEnabled: false,
},
LogConfig: oplog.CLIConfig{
Level: log.LevelTrace,
Format: oplog.FormatText,
},
RPC: oprpc.CLIConfig{
ListenAddr: "127.0.0.1",
ListenPort: 0,
EnableAdmin: true,
},
L2RPCs: []string{},
Datadir: path.Join(s.t.TempDir(), "supervisor"),
}
for id := range s.l2s {
cfg.L2RPCs = append(cfg.L2RPCs, s.l2s[id].l2Geth.UserRPC().RPC())
}
// Create the supervisor with the configuration
super, err := supervisor.SupervisorFromConfig(context.Background(), &cfg, logger)
require.NoError(s.t, err)
// Start the supervisor
err = super.Start(context.Background())
require.NoError(s.t, err)
s.t.Cleanup(func() {
ctx, cancel := context.WithCancel(context.Background())
cancel() // force-quit
_ = super.Stop(ctx)
})
return super
}
// SupervisorClient returns the supervisor client for the system, creating it if it doesn't exist
func (s *interopE2ESystem) SupervisorClient() *sources.SupervisorClient {
if s.superClient != nil {
return s.superClient
}
cl, err := client.NewRPC(context.Background(), s.logger, s.supervisor.RPC())
require.NoError(s.t, err, "failed to dial supervisor RPC")
superClient := sources.NewSupervisorClient(cl)
s.superClient = superClient
return superClient
}
// prepare sets up the system for testing
// components are built iteratively, so that they can be reused or modified
// their creation can't be safely skipped or reordered at this time
func (s *interopE2ESystem) prepare(t *testing.T, w worldResourcePaths) {
s.t = t
s.logger = testlog.Logger(s.t, log.LevelInfo)
s.hdWallet = s.prepareHDWallet()
s.worldDeployment, s.worldOutput = s.prepareWorld(w)
// the supervisor and client are created first so that the L2s can use the supervisor
s.supervisor = s.prepareSupervisor()
s.beacon, s.l1 = s.prepareL1()
s.l2s = s.prepareL2s()
// add the L2 RPCs to the supervisor now that the L2s are created
for _, l2 := range s.l2s {
err := s.SupervisorClient().AddL2RPC(context.Background(), l2.l2Geth.UserRPC().RPC())
require.NoError(s.t, err, "failed to add L2 RPC to supervisor")
}
}
// AddUser adds a user to the system by creating a user key for each L2.
// each user key is stored in the L2's userKeys map.
// because all user maps start empty, a users index should be the same for all L2s,
// but if in the future these maps can diverge, the indexes for username would also diverge
// NOTE: The first 20 accounts are implicitly funded by the Recipe's World Deployment
// see: op-chain-ops/interopgen/recipe.go
// TODO(#11887): make the funded account quantity specified in the recipe so SuperSystems can know which accounts are funded
func (s *interopE2ESystem) AddUser(username string) {
for id, l2 := range s.l2s {
bigID, _ := big.NewInt(0).SetString(id, 10)
userSecret, _ := s.hdWallet.Secret(
devkeys.ChainUserKey{
ChainID: bigID,
Index: uint64(len(l2.userKeys)),
},
)
l2.userKeys[username] = *userSecret
}
}
// UserKey returns the user key for a user on an L2
func (s *interopE2ESystem) UserKey(id, username string) ecdsa.PrivateKey {
return s.l2s[id].userKeys[username]
}
// Address returns the address for a user on an L2
func (s *interopE2ESystem) Address(id, username string) common.Address {
secret := s.UserKey(id, username)
require.NotNil(s.t, secret, "no secret found for user %s", username)
return crypto.PubkeyToAddress(secret.PublicKey)
}
// prepareL2s creates the L2s for the system, returning a map of L2s
func (s *interopE2ESystem) prepareL2s() map[string]l2Set {
l2s := make(map[string]l2Set)
for id, l2Out := range s.worldOutput.L2s {
l2s[id] = s.newL2(id, l2Out)
}
return l2s
}
func (s *interopE2ESystem) L2GethClient(id string) *ethclient.Client {
// guard: check if the client already exists and return it in that case
nodeClient, ok := s.l2GethClients[id]
if ok {
return nodeClient
}
// create a new client for the L2 from the L2's geth instance
var ethClient services.EthInstance = s.L2Geth(id)
rpcEndpoint := ethClient.UserRPC()
rpcCl := endpoint.DialRPC(
endpoint.PreferAnyRPC,
rpcEndpoint,
func(v string) *rpc.Client {
logger := testlog.Logger(s.t, log.LevelInfo).New("node", id)
cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v)
require.NoError(s.t, err, "failed to dial eth node instance %s", id)
return cl
})
nodeClient = ethclient.NewClient(rpcCl)
// register the client so it can be reused
s.addL2GethClient(id, nodeClient)
return nodeClient
}
func (sys *interopE2ESystem) addL2GethClient(name string, client *ethclient.Client) {
if sys.l2GethClients == nil {
sys.l2GethClients = make(map[string]*ethclient.Client)
}
sys.l2GethClients[name] = client
}
// getter functions for L1 entities
func (s *interopE2ESystem) Supervisor() *supervisor.SupervisorService {
return s.supervisor
}
// gettter functions for the individual L2s
func (s *interopE2ESystem) Batcher(id string) *bss.BatcherService {
return s.l2s[id].batcher
}
func (s *interopE2ESystem) Proposer(id string) *l2os.ProposerService {
return s.l2s[id].proposer
}
func (s *interopE2ESystem) OpNode(id string) *opnode.Opnode {
return s.l2s[id].opNode
}
func (s *interopE2ESystem) L2Geth(id string) *geth.GethInstance {
return s.l2s[id].l2Geth
}
func (s *interopE2ESystem) L2OperatorKey(id string, role devkeys.ChainOperatorRole) ecdsa.PrivateKey {
return s.l2s[id].operatorKeys[role]
}
// L2IDs returns the list of L2 IDs, which are the keys of the L2s map
func (s *interopE2ESystem) L2IDs() []string {
ids := make([]string, 0, len(s.l2s))
for id := range s.l2s {
ids = append(ids, id)
}
return ids
}
// SendL2Tx sends an L2 transaction to the L2 with the given ID.
// it acts as a wrapper around op-e2e.SendL2TxWithID
// and uses the L2's chain ID, username key, and geth client.
func (s *interopE2ESystem) SendL2Tx(
id string,
sender string,
applyTxOpts op_e2e.TxOptsFn,
) *types.Receipt {
senderSecret := s.UserKey(id, sender)
require.NotNil(s.t, senderSecret, "no secret found for sender %s", sender)
return op_e2e.SendL2TxWithID(
s.t,
s.l2s[id].chainID,
s.L2GethClient(id),
&senderSecret,
applyTxOpts)
}
func mustDial(t *testing.T, logger log.Logger) func(v string) *rpc.Client {
return func(v string) *rpc.Client {
cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v)
require.NoError(t, err, "failed to dial")
return cl
}
}
var testingJWTSecret = [32]byte{123}
func writeDefaultJWT(t testing.TB) string {
// Sadly the geth node config cannot load JWT secret from memory, it has to be a file
jwtPath := path.Join(t.TempDir(), "jwt_secret")
if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(testingJWTSecret[:])), 0o600); err != nil {
t.Fatalf("failed to prepare jwt file for geth: %v", err)
}
return jwtPath
}
......@@ -18,6 +18,7 @@ import (
con "github.com/ethereum-optimism/optimism/op-conductor/conductor"
"github.com/ethereum-optimism/optimism/op-conductor/consensus"
conrpc "github.com/ethereum-optimism/optimism/op-conductor/rpc"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -291,7 +292,7 @@ func setupBatcher(t *testing.T, sys *System, conductors map[string]*conductor) {
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 1 * time.Second,
TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].UserRPC().RPC(), sys.Cfg.Secrets.Batcher),
TxMgrConfig: setuputils.NewTxMgrConfig(sys.EthInstances["l1"].UserRPC(), sys.Cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: log.LevelDebug,
Format: oplog.FormatText,
......
......@@ -50,6 +50,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/services"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
......@@ -68,7 +69,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/predeploys"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
const (
......@@ -82,20 +82,6 @@ var (
genesisTime = hexutil.Uint64(0)
)
func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
return txmgr.CLIConfig{
L1RPCURL: l1Addr,
PrivateKey: hexPriv(privKey),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
FeeLimitMultiplier: 5,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
}
}
func DefaultSystemConfig(t testing.TB) SystemConfig {
config.ExternalL2TestParms.SkipIfNecessary(t)
......@@ -789,7 +775,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
ProposalInterval: 6 * time.Second,
DisputeGameType: 254, // Fast game type
PollInterval: 500 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].UserRPC().RPC(), cfg.Secrets.Proposer),
TxMgrConfig: setuputils.NewTxMgrConfig(sys.EthInstances[RoleL1].UserRPC(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: log.LvlInfo,
......@@ -802,7 +788,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
RollupRpc: sys.RollupNodes[RoleSeq].UserRPC().RPC(),
L2OOAddress: config.L1Deployments.L2OutputOracleProxy.Hex(),
PollInterval: 500 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].UserRPC().RPC(), cfg.Secrets.Proposer),
TxMgrConfig: setuputils.NewTxMgrConfig(sys.EthInstances[RoleL1].UserRPC(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: log.LvlInfo,
......@@ -865,7 +851,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].UserRPC().RPC(), cfg.Secrets.Batcher),
TxMgrConfig: setuputils.NewTxMgrConfig(sys.EthInstances[RoleL1].UserRPC(), cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: log.LevelInfo,
Format: oplog.FormatText,
......@@ -984,11 +970,6 @@ func (cfg SystemConfig) L2ChainIDBig() *big.Int {
return new(big.Int).SetUint64(cfg.DeployConfig.L2ChainID)
}
func hexPriv(in *ecdsa.PrivateKey) string {
b := e2eutils.EncodePrivKey(in)
return hexutil.Encode(b)
}
func (sys *System) RollupClient(name string) *sources.RollupClient {
rollupClient, ok := sys.rollupClients[name]
if ok {
......
......@@ -84,11 +84,11 @@ func defaultDepositTxOpts(opts *bind.TransactOpts) *DepositTxOpts {
// The supplied privKey is used to specify the account to send from and the transaction is sent to the supplied l2Client
// Transaction options and expected status can be configured in the applyTxOpts function by modifying the supplied TxOpts
// Will verify that the transaction is included with the expected status on l2Client and any clients added to TxOpts.VerifyClients
func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
func SendL2TxWithID(t *testing.T, chainID *big.Int, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
opts := defaultTxOpts()
applyTxOpts(opts)
tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(chainID), &types.DynamicFeeTx{
ChainID: chainID,
Nonce: opts.Nonce, // Already have deposit
To: opts.ToAddr,
Value: opts.Value,
......@@ -115,6 +115,10 @@ func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKe
return receipt
}
func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
return SendL2TxWithID(t, cfg.L2ChainIDBig(), l2Client, privKey, applyTxOpts)
}
type TxOptsFn func(opts *TxOpts)
type TxOpts struct {
......
......@@ -21,10 +21,29 @@ func NewSupervisorClient(client client.RPC) *SupervisorClient {
}
}
func (cl *SupervisorClient) AddL2RPC(
ctx context.Context,
rpc string,
) error {
var result error
err := cl.client.CallContext(
ctx,
&result,
"admin_addL2RPC",
rpc)
if err != nil {
return fmt.Errorf("failed to Add L2 to Supervisor (rpc: %s): %w", rpc, err)
}
return result
}
func (cl *SupervisorClient) CheckBlock(ctx context.Context,
chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (types.SafetyLevel, error) {
var result types.SafetyLevel
err := cl.client.CallContext(ctx, &result, "interop_checkBlock",
err := cl.client.CallContext(
ctx,
&result,
"supervisor_checkBlock",
(*hexutil.U256)(&chainID), blockHash, hexutil.Uint64(blockNumber))
if err != nil {
return types.Unsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err)
......
......@@ -25,7 +25,6 @@ import (
)
type SupervisorBackend struct {
ctx context.Context
started atomic.Bool
logger log.Logger
m Metrics
......@@ -82,7 +81,7 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
// it does not expect to be called after the backend has been started
func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, rpc string) error {
// create the rpc client, which yields the chain id
rpcClient, chainID, err := createRpcClient(su.ctx, logger, rpc)
rpcClient, chainID, err := createRpcClient(ctx, logger, rpc)
if err != nil {
return err
}
......@@ -173,9 +172,11 @@ func (su *SupervisorBackend) AddL2RPC(ctx context.Context, rpc string) error {
if err := su.Stop(ctx); err != nil {
return fmt.Errorf("failed to stop backend: %w", err)
}
su.logger.Info("temporarily stopped the backend to add a new L2 RPC", "rpc", rpc)
if err := su.addFromRPC(ctx, su.logger, rpc); err != nil {
return fmt.Errorf("failed to add chain monitor: %w", err)
}
su.logger.Info("added the new L2 RPC, starting supervisor again", "rpc", rpc)
return su.Start(ctx)
}
......@@ -231,7 +232,10 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.
safest := types.CrossUnsafe
// find the last log index in the block
i, err := su.db.LastLogInBlock(types.ChainID(*chainID), uint64(blockNumber))
if err != nil {
// TODO(#11836) checking for EOF as a non-error case is a bit of a code smell
// and could potentially be incorrect if the given block number is intentionally far in the future
if err != nil && !errors.Is(err, io.EOF) {
su.logger.Error("failed to scan block", "err", err)
return types.Invalid, fmt.Errorf("failed to scan block: %w", err)
}
// at this point we have the extent of the block, and we can check if it is safe by various criteria
......
......@@ -49,10 +49,13 @@ func NewChainProcessor(log log.Logger, client BlockByNumberSource, chain types.C
}
func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) {
s.log.Debug("Processing chain", "chain", s.chain, "head", head)
if head.Number <= s.lastBlock.Number {
s.log.Info("head is not newer than last processed block", "head", head, "lastBlock", s.lastBlock)
return
}
for s.lastBlock.Number+1 < head.Number {
s.log.Debug("Filling in skipped block", "chain", s.chain, "lastBlock", s.lastBlock, "head", head)
blockNum := s.lastBlock.Number + 1
nextBlock, err := s.client.L1BlockRefByNumber(ctx, blockNum)
if err != nil {
......
......@@ -164,6 +164,7 @@ func (su *SupervisorService) Start(ctx context.Context) error {
}
su.metrics.RecordUp()
su.log.Info("JSON-RPC Server started", "endpoint", su.rpcServer.Endpoint())
return nil
}
......@@ -171,7 +172,7 @@ func (su *SupervisorService) Stop(ctx context.Context) error {
if !su.closing.CompareAndSwap(false, true) {
return nil // already closing
}
su.log.Info("Stopping JSON-RPC server")
var result error
if su.rpcServer != nil {
if err := su.rpcServer.Stop(); err != nil {
......@@ -193,9 +194,16 @@ func (su *SupervisorService) Stop(ctx context.Context) error {
result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
}
}
su.log.Info("JSON-RPC server stopped")
return result
}
func (su *SupervisorService) Stopped() bool {
return su.closing.Load()
}
func (su *SupervisorService) RPC() string {
// the RPC endpoint is assumed to be HTTP
// TODO(#11032): make this flexible for ws if the server supports it
return "http://" + su.rpcServer.Endpoint()
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment