Commit eac4e593 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into willc/changesets-one-branch

parents bb54b711 34175ef7
package op_e2e
import (
"context"
"os"
"os/exec"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// BuildOpProgramClient builds the `op-program` client executable and returns the path to the resulting executable
func BuildOpProgramClient(t *testing.T) string {
t.Log("Building op-program-client")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, "make", "op-program-client")
cmd.Dir = "../op-program"
cmd.Stdout = os.Stdout // for debugging
cmd.Stderr = os.Stderr // for debugging
require.NoError(t, cmd.Run(), "Failed to build op-program-client")
t.Log("Built op-program-client successfully")
return "../op-program/bin/op-program-client"
}
...@@ -9,11 +9,9 @@ import ( ...@@ -9,11 +9,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
oppcl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/client/driver" "github.com/ethereum-optimism/optimism/op-program/client/driver"
opp "github.com/ethereum-optimism/optimism/op-program/host" opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config" oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -21,18 +19,6 @@ import ( ...@@ -21,18 +19,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// bypass the test runnner if running client to execute the fpp directly
func init() {
if !opp.RunningProgramInClient() {
return
}
logger := oplog.NewLogger(oplog.CLIConfig{
Level: "debug",
Format: "text",
})
oppcl.Main(logger)
}
func TestVerifyL2OutputRoot(t *testing.T) { func TestVerifyL2OutputRoot(t *testing.T) {
testVerifyL2OutputRoot(t, false) testVerifyL2OutputRoot(t, false)
} }
...@@ -115,7 +101,10 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) { ...@@ -115,7 +101,10 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
fppConfig.L1URL = sys.NodeEndpoint("l1") fppConfig.L1URL = sys.NodeEndpoint("l1")
fppConfig.L2URL = sys.NodeEndpoint("sequencer") fppConfig.L2URL = sys.NodeEndpoint("sequencer")
fppConfig.DataDir = preimageDir fppConfig.DataDir = preimageDir
fppConfig.Detached = detached if detached {
// When running in detached mode we need to compile the client executable since it will be called directly.
fppConfig.ExecCmd = BuildOpProgramClient(t)
}
// Check the FPP confirms the expected output // Check the FPP confirms the expected output
t.Log("Running fault proof in fetching mode") t.Log("Running fault proof in fetching mode")
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"os" "os"
cl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/client/driver" "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host" "github.com/ethereum-optimism/optimism/op-program/host"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
...@@ -37,11 +36,6 @@ var VersionWithMeta = func() string { ...@@ -37,11 +36,6 @@ var VersionWithMeta = func() string {
}() }()
func main() { func main() {
if host.RunningProgramInClient() {
logger := oplog.NewLogger(oplog.DefaultCLIConfig())
cl.Main(logger)
panic("Client main should have exited process")
}
args := os.Args args := os.Args
if err := run(args, host.FaultProofProgram); errors.Is(err, driver.ErrClaimNotValid) { if err := run(args, host.FaultProofProgram); errors.Is(err, driver.ErrClaimNotValid) {
log.Crit("Claim is invalid", "err", err) log.Crit("Claim is invalid", "err", err)
......
...@@ -220,22 +220,15 @@ func TestL2BlockNumber(t *testing.T) { ...@@ -220,22 +220,15 @@ func TestL2BlockNumber(t *testing.T) {
}) })
} }
func TestDetached(t *testing.T) { func TestExec(t *testing.T) {
t.Run("DefaultFalse", func(t *testing.T) { t.Run("DefaultEmpty", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs()) cfg := configForArgs(t, addRequiredArgs())
require.False(t, cfg.Detached) require.Equal(t, "", cfg.ExecCmd)
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached"))
require.True(t, cfg.Detached)
}) })
t.Run("EnabledWithArg", func(t *testing.T) { t.Run("Set", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached=true")) cmd := "/bin/echo"
require.True(t, cfg.Detached) cfg := configForArgs(t, addRequiredArgs("--exec", cmd))
}) require.Equal(t, cmd, cfg.ExecCmd)
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached=false"))
require.False(t, cfg.Detached)
}) })
} }
......
...@@ -49,8 +49,9 @@ type Config struct { ...@@ -49,8 +49,9 @@ type Config struct {
L2ClaimBlockNumber uint64 L2ClaimBlockNumber uint64
// L2ChainConfig is the op-geth chain config for the L2 execution engine // L2ChainConfig is the op-geth chain config for the L2 execution engine
L2ChainConfig *params.ChainConfig L2ChainConfig *params.ChainConfig
// Detached indicates that the program runs as a separate process // ExecCmd specifies the client program to execute in a separate process.
Detached bool // If unset, the fault proof client is run in the same process.
ExecCmd string
} }
func (c *Config) Check() error { func (c *Config) Check() error {
...@@ -148,7 +149,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) { ...@@ -148,7 +149,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name), L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name), L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)), L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
Detached: ctx.GlobalBool(flags.Detached.Name), ExecCmd: ctx.String(flags.Exec.Name),
}, nil }, nil
} }
......
...@@ -81,10 +81,10 @@ var ( ...@@ -81,10 +81,10 @@ var (
return &out return &out
}(), }(),
} }
Detached = cli.BoolFlag{ Exec = cli.StringFlag{
Name: "detached", Name: "exec",
Usage: "Run the program as a separate process detached from the host", Usage: "Run the specified client program as a separate process detached from the host. Default is to run the client program in the host process.",
EnvVar: service.PrefixEnvVar(envVarPrefix, "DETACHED"), EnvVar: service.PrefixEnvVar(envVarPrefix, "EXEC"),
} }
) )
...@@ -106,7 +106,7 @@ var programFlags = []cli.Flag{ ...@@ -106,7 +106,7 @@ var programFlags = []cli.Flag{
L1NodeAddr, L1NodeAddr,
L1TrustRPC, L1TrustRPC,
L1RPCProviderKind, L1RPCProviderKind,
Detached, Exec,
} }
func init() { func init() {
......
...@@ -27,13 +27,6 @@ type L2Source struct { ...@@ -27,13 +27,6 @@ type L2Source struct {
*sources.DebugClient *sources.DebugClient
} }
const opProgramChildEnvName = "OP_PROGRAM_CHILD"
func RunningProgramInClient() bool {
value, _ := os.LookupEnv(opProgramChildEnvName)
return value == "true"
}
// FaultProofProgram is the programmatic entry-point for the fault proof program // FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error { func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
...@@ -96,8 +89,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error { ...@@ -96,8 +89,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
routeHints(logger, hHost, hinter) routeHints(logger, hHost, hinter)
var cmd *exec.Cmd var cmd *exec.Cmd
if cfg.Detached { if cfg.ExecCmd != "" {
cmd = exec.CommandContext(ctx, os.Args[0]) cmd = exec.CommandContext(ctx, cfg.ExecCmd)
cmd.ExtraFiles = make([]*os.File, cl.MaxFd-3) // not including stdin, stdout and stderr cmd.ExtraFiles = make([]*os.File, cl.MaxFd-3) // not including stdin, stdout and stderr
cmd.ExtraFiles[cl.HClientRFd-3] = hClientRW.Reader() cmd.ExtraFiles[cl.HClientRFd-3] = hClientRW.Reader()
cmd.ExtraFiles[cl.HClientWFd-3] = hClientRW.Writer() cmd.ExtraFiles[cl.HClientWFd-3] = hClientRW.Writer()
...@@ -105,7 +98,6 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error { ...@@ -105,7 +98,6 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
cmd.ExtraFiles[cl.PClientWFd-3] = pClientRW.Writer() cmd.ExtraFiles[cl.PClientWFd-3] = pClientRW.Writer()
cmd.Stdout = os.Stdout // for debugging cmd.Stdout = os.Stdout // for debugging
cmd.Stderr = os.Stderr // for debugging cmd.Stderr = os.Stderr // for debugging
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=true", opProgramChildEnvName))
err := cmd.Start() err := cmd.Start()
if err != nil { if err != nil {
......
...@@ -591,9 +591,17 @@ func (b *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch b ...@@ -591,9 +591,17 @@ func (b *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch b
return nil, nil return nil, nil
} }
backends := b.Backends
// When `consensus_aware` is set to `true`, the backend group acts as a load balancer
// serving traffic from any backend that agrees in the consensus group
if b.Consensus != nil {
backends = b.loadBalancedConsensusGroup()
}
rpcRequestsTotal.Inc() rpcRequestsTotal.Inc()
for _, back := range b.Backends { for _, back := range backends {
res, err := back.Forward(ctx, rpcReqs, isBatch) res, err := back.Forward(ctx, rpcReqs, isBatch)
if errors.Is(err, ErrMethodNotWhitelisted) { if errors.Is(err, ErrMethodNotWhitelisted) {
return nil, err return nil, err
...@@ -670,6 +678,40 @@ func (b *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn, ...@@ -670,6 +678,40 @@ func (b *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn,
return nil, ErrNoBackends return nil, ErrNoBackends
} }
func (b *BackendGroup) loadBalancedConsensusGroup() []*Backend {
cg := b.Consensus.GetConsensusGroup()
backendsHealthy := make([]*Backend, 0, len(cg))
backendsDegraded := make([]*Backend, 0, len(cg))
// separate into healthy, degraded and unhealthy backends
for _, be := range cg {
// unhealthy are filtered out and not attempted
if !be.IsHealthy() {
continue
}
if be.IsDegraded() {
backendsDegraded = append(backendsDegraded, be)
continue
}
backendsHealthy = append(backendsHealthy, be)
}
// shuffle both slices
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(backendsHealthy), func(i, j int) {
backendsHealthy[i], backendsHealthy[j] = backendsHealthy[j], backendsHealthy[i]
})
r.Shuffle(len(backendsDegraded), func(i, j int) {
backendsDegraded[i], backendsDegraded[j] = backendsDegraded[j], backendsDegraded[i]
})
// healthy are put into a priority position
// degraded backends are used as fallback
backendsHealthy = append(backendsHealthy, backendsDegraded...)
return backendsHealthy
}
func calcBackoff(i int) time.Duration { func calcBackoff(i int) time.Duration {
jitter := float64(rand.Int63n(250)) jitter := float64(rand.Int63n(250))
ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000) ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000)
......
...@@ -3,6 +3,7 @@ package integration_tests ...@@ -3,6 +3,7 @@ package integration_tests
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"os" "os"
"path" "path"
...@@ -47,6 +48,7 @@ func TestConsensus(t *testing.T) { ...@@ -47,6 +48,7 @@ func TestConsensus(t *testing.T) {
ctx := context.Background() ctx := context.Background()
svr, shutdown, err := proxyd.Start(config) svr, shutdown, err := proxyd.Start(config)
require.NoError(t, err) require.NoError(t, err)
client := NewProxydClient("http://127.0.0.1:8545")
defer shutdown() defer shutdown()
bg := svr.BackendGroups["node"] bg := svr.BackendGroups["node"]
...@@ -76,7 +78,6 @@ func TestConsensus(t *testing.T) { ...@@ -76,7 +78,6 @@ func TestConsensus(t *testing.T) {
h2.ResetOverrides() h2.ResetOverrides()
bg.Consensus.Unban() bg.Consensus.Unban()
// advance latest on node2 to 0x2
h1.AddOverride(&ms.MethodTemplate{ h1.AddOverride(&ms.MethodTemplate{
Method: "net_peerCount", Method: "net_peerCount",
Block: "", Block: "",
...@@ -355,6 +356,83 @@ func TestConsensus(t *testing.T) { ...@@ -355,6 +356,83 @@ func TestConsensus(t *testing.T) {
// should resolve to 0x1, the highest common ancestor // should resolve to 0x1, the highest common ancestor
require.Equal(t, "0x1", bg.Consensus.GetConsensusBlockNumber().String()) require.Equal(t, "0x1", bg.Consensus.GetConsensusBlockNumber().String())
}) })
t.Run("load balancing should hit both backends", func(t *testing.T) {
h1.ResetOverrides()
h2.ResetOverrides()
bg.Consensus.Unban()
for _, be := range bg.Backends {
bg.Consensus.UpdateBackend(ctx, be)
}
bg.Consensus.UpdateBackendGroupConsensus(ctx)
require.Equal(t, 2, len(bg.Consensus.GetConsensusGroup()))
node1.Reset()
node2.Reset()
require.Equal(t, 0, len(node1.Requests()))
require.Equal(t, 0, len(node2.Requests()))
// there is a random component to this test,
// since our round-robin implementation shuffles the ordering
// to achieve uniform distribution
// so we just make 100 requests per backend and expect the number of requests to be somewhat balanced
// i.e. each backend should be hit minimally by at least 50% of the requests
consensusGroup := bg.Consensus.GetConsensusGroup()
numberReqs := len(consensusGroup) * 100
for numberReqs > 0 {
_, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x1", false})
require.NoError(t, err)
require.Equal(t, 200, statusCode)
numberReqs--
}
msg := fmt.Sprintf("n1 %d, n2 %d", len(node1.Requests()), len(node2.Requests()))
require.GreaterOrEqual(t, len(node1.Requests()), 50, msg)
require.GreaterOrEqual(t, len(node2.Requests()), 50, msg)
})
t.Run("load balancing should not hit if node is not healthy", func(t *testing.T) {
h1.ResetOverrides()
h2.ResetOverrides()
bg.Consensus.Unban()
// node1 should not be serving any traffic
h1.AddOverride(&ms.MethodTemplate{
Method: "net_peerCount",
Block: "",
Response: buildPeerCountResponse(1),
})
for _, be := range bg.Backends {
bg.Consensus.UpdateBackend(ctx, be)
}
bg.Consensus.UpdateBackendGroupConsensus(ctx)
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
node1.Reset()
node2.Reset()
require.Equal(t, 0, len(node1.Requests()))
require.Equal(t, 0, len(node2.Requests()))
numberReqs := 10
for numberReqs > 0 {
_, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x1", false})
require.NoError(t, err)
require.Equal(t, 200, statusCode)
numberReqs--
}
msg := fmt.Sprintf("n1 %d, n2 %d", len(node1.Requests()), len(node2.Requests()))
require.Equal(t, len(node1.Requests()), 0, msg)
require.Equal(t, len(node2.Requests()), 10, msg)
})
} }
func backend(bg *proxyd.BackendGroup, name string) *proxyd.Backend { func backend(bg *proxyd.BackendGroup, name string) *proxyd.Backend {
......
[server] [server]
rpc_port = 8080 rpc_port = 8545
[backend] [backend]
response_timeout_seconds = 1 response_timeout_seconds = 1
......
- method: eth_chainId
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": "hello",
}
- method: net_peerCount - method: net_peerCount
response: > response: >
{ {
......
...@@ -66,10 +66,10 @@ A deposit has the following fields ...@@ -66,10 +66,10 @@ A deposit has the following fields
deposited transaction is a contract creation. deposited transaction is a contract creation.
- `uint256 mint`: The ETH value to mint on L2. - `uint256 mint`: The ETH value to mint on L2.
- `uint256 value`: The ETH value to send to the recipient account. - `uint256 value`: The ETH value to send to the recipient account.
- `bytes data`: The input data. - `uint64 gas`: The gas limit for the L2 transaction.
- `bool isSystemTx`: If true, the transaction does not interact with the L2 block gas pool. - `bool isSystemTx`: If true, the transaction does not interact with the L2 block gas pool.
- Note: boolean is disabled (enforced to be `false`) starting from the Regolith upgrade. - Note: boolean is disabled (enforced to be `false`) starting from the Regolith upgrade.
- `uint64 gasLimit`: The gasLimit for the L2 transaction. - `bytes data`: The calldata.
In contrast to [EIP-155] transactions, this transaction type: In contrast to [EIP-155] transactions, this transaction type:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment