Commit 87dd5a47 authored by Evan Richard's avatar Evan Richard Committed by GitHub

Merge pull request #8431 from ethereum-optimism/evan/configurable_max_rpc

Add config flag to set max number of concurrent L1 RPC requests.
parents 4b08e088 0d7d2242
......@@ -814,6 +814,7 @@ func configureL1(rollupNodeCfg *rollupNode.Config, l1Node EthInstance) {
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Millisecond * 100,
MaxConcurrency: 10,
}
}
......
......@@ -100,6 +100,12 @@ var (
Required: false,
Hidden: true,
}
L1RPCMaxConcurrency = &cli.IntFlag{
Name: "l1.max-concurrency",
Usage: "Maximum number of concurrent RPC requests to make to the L1 RPC provider.",
EnvVars: prefixEnvVars("L1_MAX_CONCURRENCY"),
Value: 10,
}
L1RPCRateLimit = &cli.Float64Flag{
Name: "l1.rpc-rate-limit",
Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.",
......@@ -295,6 +301,7 @@ var optionalFlags = []cli.Flag{
L1RPCProviderKind,
L1RPCRateLimit,
L1RPCMaxBatchSize,
L1RPCMaxConcurrency,
L1HTTPPollInterval,
L2EngineJWTSecret,
VerifierL1Confs,
......
......@@ -151,6 +151,9 @@ type L1EndpointConfig struct {
// BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set).
BatchSize int
// MaxConcurrency specifies the maximum number of concurrent requests to the L1 RPC.
MaxConcurrency int
// HttpPollInterval specifies the interval between polling for the latest L1 block,
// when the RPC is detected to be an HTTP type.
// It is recommended to use websockets or IPC for efficient following of the changing block.
......@@ -167,6 +170,9 @@ func (cfg *L1EndpointConfig) Check() error {
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
}
if cfg.MaxConcurrency < 1 {
return fmt.Errorf("max concurrent requests cannot be less than 1, was %d", cfg.MaxConcurrency)
}
return nil
}
......@@ -185,6 +191,7 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
}
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
return l1Node, rpcCfg, nil
}
......
......@@ -129,6 +129,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment