Commit eac4e593 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into willc/changesets-one-branch

parents bb54b711 34175ef7
package op_e2e
import (
"context"
"os"
"os/exec"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// BuildOpProgramClient builds the `op-program` client executable and returns the path to the resulting executable
func BuildOpProgramClient(t *testing.T) string {
t.Log("Building op-program-client")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, "make", "op-program-client")
cmd.Dir = "../op-program"
cmd.Stdout = os.Stdout // for debugging
cmd.Stderr = os.Stderr // for debugging
require.NoError(t, cmd.Run(), "Failed to build op-program-client")
t.Log("Built op-program-client successfully")
return "../op-program/bin/op-program-client"
}
......@@ -9,11 +9,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
oppcl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/client/driver"
opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
......@@ -21,18 +19,6 @@ import (
"github.com/stretchr/testify/require"
)
// bypass the test runnner if running client to execute the fpp directly
func init() {
if !opp.RunningProgramInClient() {
return
}
logger := oplog.NewLogger(oplog.CLIConfig{
Level: "debug",
Format: "text",
})
oppcl.Main(logger)
}
func TestVerifyL2OutputRoot(t *testing.T) {
testVerifyL2OutputRoot(t, false)
}
......@@ -115,7 +101,10 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
fppConfig.L1URL = sys.NodeEndpoint("l1")
fppConfig.L2URL = sys.NodeEndpoint("sequencer")
fppConfig.DataDir = preimageDir
fppConfig.Detached = detached
if detached {
// When running in detached mode we need to compile the client executable since it will be called directly.
fppConfig.ExecCmd = BuildOpProgramClient(t)
}
// Check the FPP confirms the expected output
t.Log("Running fault proof in fetching mode")
......
......@@ -5,7 +5,6 @@ import (
"fmt"
"os"
cl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host"
"github.com/ethereum-optimism/optimism/op-program/host/config"
......@@ -37,11 +36,6 @@ var VersionWithMeta = func() string {
}()
func main() {
if host.RunningProgramInClient() {
logger := oplog.NewLogger(oplog.DefaultCLIConfig())
cl.Main(logger)
panic("Client main should have exited process")
}
args := os.Args
if err := run(args, host.FaultProofProgram); errors.Is(err, driver.ErrClaimNotValid) {
log.Crit("Claim is invalid", "err", err)
......
......@@ -220,22 +220,15 @@ func TestL2BlockNumber(t *testing.T) {
})
}
func TestDetached(t *testing.T) {
t.Run("DefaultFalse", func(t *testing.T) {
func TestExec(t *testing.T) {
t.Run("DefaultEmpty", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.False(t, cfg.Detached)
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached"))
require.True(t, cfg.Detached)
require.Equal(t, "", cfg.ExecCmd)
})
t.Run("EnabledWithArg", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached=true"))
require.True(t, cfg.Detached)
})
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--detached=false"))
require.False(t, cfg.Detached)
t.Run("Set", func(t *testing.T) {
cmd := "/bin/echo"
cfg := configForArgs(t, addRequiredArgs("--exec", cmd))
require.Equal(t, cmd, cfg.ExecCmd)
})
}
......
......@@ -49,8 +49,9 @@ type Config struct {
L2ClaimBlockNumber uint64
// L2ChainConfig is the op-geth chain config for the L2 execution engine
L2ChainConfig *params.ChainConfig
// Detached indicates that the program runs as a separate process
Detached bool
// ExecCmd specifies the client program to execute in a separate process.
// If unset, the fault proof client is run in the same process.
ExecCmd string
}
func (c *Config) Check() error {
......@@ -148,7 +149,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
Detached: ctx.GlobalBool(flags.Detached.Name),
ExecCmd: ctx.String(flags.Exec.Name),
}, nil
}
......
......@@ -81,10 +81,10 @@ var (
return &out
}(),
}
Detached = cli.BoolFlag{
Name: "detached",
Usage: "Run the program as a separate process detached from the host",
EnvVar: service.PrefixEnvVar(envVarPrefix, "DETACHED"),
Exec = cli.StringFlag{
Name: "exec",
Usage: "Run the specified client program as a separate process detached from the host. Default is to run the client program in the host process.",
EnvVar: service.PrefixEnvVar(envVarPrefix, "EXEC"),
}
)
......@@ -106,7 +106,7 @@ var programFlags = []cli.Flag{
L1NodeAddr,
L1TrustRPC,
L1RPCProviderKind,
Detached,
Exec,
}
func init() {
......
......@@ -27,13 +27,6 @@ type L2Source struct {
*sources.DebugClient
}
const opProgramChildEnvName = "OP_PROGRAM_CHILD"
func RunningProgramInClient() bool {
value, _ := os.LookupEnv(opProgramChildEnvName)
return value == "true"
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
......@@ -96,8 +89,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
routeHints(logger, hHost, hinter)
var cmd *exec.Cmd
if cfg.Detached {
cmd = exec.CommandContext(ctx, os.Args[0])
if cfg.ExecCmd != "" {
cmd = exec.CommandContext(ctx, cfg.ExecCmd)
cmd.ExtraFiles = make([]*os.File, cl.MaxFd-3) // not including stdin, stdout and stderr
cmd.ExtraFiles[cl.HClientRFd-3] = hClientRW.Reader()
cmd.ExtraFiles[cl.HClientWFd-3] = hClientRW.Writer()
......@@ -105,7 +98,6 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
cmd.ExtraFiles[cl.PClientWFd-3] = pClientRW.Writer()
cmd.Stdout = os.Stdout // for debugging
cmd.Stderr = os.Stderr // for debugging
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=true", opProgramChildEnvName))
err := cmd.Start()
if err != nil {
......
......@@ -591,9 +591,17 @@ func (b *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch b
return nil, nil
}
backends := b.Backends
// When `consensus_aware` is set to `true`, the backend group acts as a load balancer
// serving traffic from any backend that agrees in the consensus group
if b.Consensus != nil {
backends = b.loadBalancedConsensusGroup()
}
rpcRequestsTotal.Inc()
for _, back := range b.Backends {
for _, back := range backends {
res, err := back.Forward(ctx, rpcReqs, isBatch)
if errors.Is(err, ErrMethodNotWhitelisted) {
return nil, err
......@@ -670,6 +678,40 @@ func (b *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn,
return nil, ErrNoBackends
}
func (b *BackendGroup) loadBalancedConsensusGroup() []*Backend {
cg := b.Consensus.GetConsensusGroup()
backendsHealthy := make([]*Backend, 0, len(cg))
backendsDegraded := make([]*Backend, 0, len(cg))
// separate into healthy, degraded and unhealthy backends
for _, be := range cg {
// unhealthy are filtered out and not attempted
if !be.IsHealthy() {
continue
}
if be.IsDegraded() {
backendsDegraded = append(backendsDegraded, be)
continue
}
backendsHealthy = append(backendsHealthy, be)
}
// shuffle both slices
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(backendsHealthy), func(i, j int) {
backendsHealthy[i], backendsHealthy[j] = backendsHealthy[j], backendsHealthy[i]
})
r.Shuffle(len(backendsDegraded), func(i, j int) {
backendsDegraded[i], backendsDegraded[j] = backendsDegraded[j], backendsDegraded[i]
})
// healthy are put into a priority position
// degraded backends are used as fallback
backendsHealthy = append(backendsHealthy, backendsDegraded...)
return backendsHealthy
}
func calcBackoff(i int) time.Duration {
jitter := float64(rand.Int63n(250))
ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000)
......
......@@ -3,6 +3,7 @@ package integration_tests
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path"
......@@ -47,6 +48,7 @@ func TestConsensus(t *testing.T) {
ctx := context.Background()
svr, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
client := NewProxydClient("http://127.0.0.1:8545")
defer shutdown()
bg := svr.BackendGroups["node"]
......@@ -76,7 +78,6 @@ func TestConsensus(t *testing.T) {
h2.ResetOverrides()
bg.Consensus.Unban()
// advance latest on node2 to 0x2
h1.AddOverride(&ms.MethodTemplate{
Method: "net_peerCount",
Block: "",
......@@ -355,6 +356,83 @@ func TestConsensus(t *testing.T) {
// should resolve to 0x1, the highest common ancestor
require.Equal(t, "0x1", bg.Consensus.GetConsensusBlockNumber().String())
})
t.Run("load balancing should hit both backends", func(t *testing.T) {
h1.ResetOverrides()
h2.ResetOverrides()
bg.Consensus.Unban()
for _, be := range bg.Backends {
bg.Consensus.UpdateBackend(ctx, be)
}
bg.Consensus.UpdateBackendGroupConsensus(ctx)
require.Equal(t, 2, len(bg.Consensus.GetConsensusGroup()))
node1.Reset()
node2.Reset()
require.Equal(t, 0, len(node1.Requests()))
require.Equal(t, 0, len(node2.Requests()))
// there is a random component to this test,
// since our round-robin implementation shuffles the ordering
// to achieve uniform distribution
// so we just make 100 requests per backend and expect the number of requests to be somewhat balanced
// i.e. each backend should be hit minimally by at least 50% of the requests
consensusGroup := bg.Consensus.GetConsensusGroup()
numberReqs := len(consensusGroup) * 100
for numberReqs > 0 {
_, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x1", false})
require.NoError(t, err)
require.Equal(t, 200, statusCode)
numberReqs--
}
msg := fmt.Sprintf("n1 %d, n2 %d", len(node1.Requests()), len(node2.Requests()))
require.GreaterOrEqual(t, len(node1.Requests()), 50, msg)
require.GreaterOrEqual(t, len(node2.Requests()), 50, msg)
})
t.Run("load balancing should not hit if node is not healthy", func(t *testing.T) {
h1.ResetOverrides()
h2.ResetOverrides()
bg.Consensus.Unban()
// node1 should not be serving any traffic
h1.AddOverride(&ms.MethodTemplate{
Method: "net_peerCount",
Block: "",
Response: buildPeerCountResponse(1),
})
for _, be := range bg.Backends {
bg.Consensus.UpdateBackend(ctx, be)
}
bg.Consensus.UpdateBackendGroupConsensus(ctx)
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
node1.Reset()
node2.Reset()
require.Equal(t, 0, len(node1.Requests()))
require.Equal(t, 0, len(node2.Requests()))
numberReqs := 10
for numberReqs > 0 {
_, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x1", false})
require.NoError(t, err)
require.Equal(t, 200, statusCode)
numberReqs--
}
msg := fmt.Sprintf("n1 %d, n2 %d", len(node1.Requests()), len(node2.Requests()))
require.Equal(t, len(node1.Requests()), 0, msg)
require.Equal(t, len(node2.Requests()), 10, msg)
})
}
func backend(bg *proxyd.BackendGroup, name string) *proxyd.Backend {
......
[server]
rpc_port = 8080
rpc_port = 8545
[backend]
response_timeout_seconds = 1
......
- method: eth_chainId
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": "hello",
}
- method: net_peerCount
response: >
{
......
......@@ -66,10 +66,10 @@ A deposit has the following fields
deposited transaction is a contract creation.
- `uint256 mint`: The ETH value to mint on L2.
- `uint256 value`: The ETH value to send to the recipient account.
- `bytes data`: The input data.
- `uint64 gas`: The gas limit for the L2 transaction.
- `bool isSystemTx`: If true, the transaction does not interact with the L2 block gas pool.
- Note: boolean is disabled (enforced to be `false`) starting from the Regolith upgrade.
- `uint64 gasLimit`: The gasLimit for the L2 transaction.
- `bytes data`: The calldata.
In contrast to [EIP-155] transactions, this transaction type:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment