Commit 65f9aaec authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into feat/p2p-metrics

parents 913d77a6 149d4f01
---
'@eth-optimism/proxyd': minor
---
Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError
---
'@eth-optimism/contracts-bedrock': patch
---
Use uint64 for arithmetic in XDM's baseGas
---
'@eth-optimism/sdk': patch
---
Adds contract addresses for the Bedrock Alpha testnet
---
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/sdk': patch
---
Rename the event emitted in the L2ToL1MessagePasser
---
'@eth-optimism/contracts-periphery': patch
---
Goerli nft bridge deployment
---
'@eth-optimism/l2geth': patch
---
add --rpc.evmtimeout flag to configure timeout for eth_call
---
'@eth-optimism/integration-tests': patch
'@eth-optimism/contracts-periphery': patch
---
Fix erc721 factory to match erc21 factory
---
'@eth-optimism/indexer': minor
---
Bedrock support
---
'@eth-optimism/proxyd': minor
---
Adds new Redis rate limiter
---
"@eth-optimism/contracts-periphery": patch
---
mainnet nft bridge deployments
---
'@eth-optimism/contracts-bedrock': patch
---
Removes an unnecessary initializer parameter in the L200
---
'@eth-optimism/endpoint-monitor': major
---
Initial release of endpoint monitor
---
'@eth-optimism/ci-builder': patch
---
Pin slither version to 0.9.0
# @eth-optimism/endpoint-monitor
## 1.0.0
### Major Changes
- a10c2b49: Initial release of endpoint monitor
{
"name": "@eth-optimism/endpoint-monitor",
"version": "0.0.0",
"version": "1.0.0",
"private": true,
"dependencies": {}
}
# @eth-optimism/indexer
## 0.3.0
### Minor Changes
- 19e581d8: Bedrock support
## 0.2.0
### Minor Changes
......
{
"name": "@eth-optimism/indexer",
"version": "0.2.0",
"version": "0.3.0",
"private": true,
"license": "MIT"
}
# @eth-optimism/integration-tests
## 0.5.21
### Patch Changes
- a3242d4f: Fix erc721 factory to match erc21 factory
## 0.5.20
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/integration-tests",
"version": "0.5.20",
"version": "0.5.21",
"description": "[Optimism] Integration tests",
"scripts": {
"lint": "yarn lint:fix && yarn lint:check",
......@@ -30,9 +30,9 @@
"devDependencies": {
"@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.37",
"@eth-optimism/contracts-periphery": "^1.0.1",
"@eth-optimism/contracts-periphery": "^1.0.2",
"@eth-optimism/core-utils": "0.10.1",
"@eth-optimism/sdk": "1.6.6",
"@eth-optimism/sdk": "1.6.7",
"@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0",
......
# Changelog
## 0.5.25
### Patch Changes
- 89f1abfa: add --rpc.evmtimeout flag to configure timeout for eth_call
## 0.5.24
### Patch Changes
......
{
"name": "@eth-optimism/l2geth",
"version": "0.5.24",
"version": "0.5.25",
"private": true,
"devDependencies": {}
}
......@@ -10,6 +10,7 @@ const (
DevOptimismMintableERC20Factory = "0x6900000000000000000000000000000000000004"
DevAddressManager = "0x6900000000000000000000000000000000000005"
DevProxyAdmin = "0x6900000000000000000000000000000000000006"
DevWETH9 = "0x6900000000000000000000000000000000000007"
)
var (
......@@ -20,6 +21,7 @@ var (
DevOptimismMintableERC20FactoryAddr = common.HexToAddress(DevOptimismMintableERC20Factory)
DevAddressManagerAddr = common.HexToAddress(DevAddressManager)
DevProxyAdminAddr = common.HexToAddress(DevProxyAdmin)
DevWETH9Addr = common.HexToAddress(DevWETH9)
DevPredeploys = make(map[string]*common.Address)
)
......@@ -32,4 +34,5 @@ func init() {
DevPredeploys["OptimismMintableERC20Factory"] = &DevOptimismMintableERC20FactoryAddr
DevPredeploys["AddressManager"] = &DevAddressManagerAddr
DevPredeploys["Admin"] = &DevProxyAdminAddr
DevPredeploys["WETH9"] = &DevWETH9Addr
}
......@@ -165,6 +165,16 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
for name, proxyAddr := range predeploys.DevPredeploys {
memDB.SetState(*proxyAddr, ImplementationSlot, depsByName[name].Address.Hash())
// Special case for WETH since it was not designed to be behind a proxy
if name == "WETH9" {
name, _ := state.EncodeStringValue("Wrapped Ether", 0)
symbol, _ := state.EncodeStringValue("WETH", 0)
decimals, _ := state.EncodeUintValue(18, 0)
memDB.SetState(*proxyAddr, common.Hash{}, name)
memDB.SetState(*proxyAddr, common.Hash{31: 0x01}, symbol)
memDB.SetState(*proxyAddr, common.Hash{31: 0x02}, decimals)
}
}
stateDB, err := backend.Blockchain().State()
......@@ -183,6 +193,7 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
memDB.CreateAccount(depAddr)
memDB.SetCode(depAddr, dep.Bytecode)
for iter.Next() {
_, data, _, err := rlp.Split(iter.Value)
if err != nil {
......@@ -250,6 +261,9 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend)
common.Address{19: 0x01},
},
},
{
Name: "WETH9",
},
}...)
return deployer.Deploy(backend, constructors, l1Deployer)
}
......@@ -308,6 +322,11 @@ func l1Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
backend,
common.Address{},
)
case "WETH9":
_, tx, _, err = bindings.DeployWETH9(
opts,
backend,
)
default:
if strings.HasSuffix(deployment.Name, "Proxy") {
_, tx, _, err = bindings.DeployProxy(opts, backend, deployer.TestAddress)
......
......@@ -92,6 +92,18 @@ func TestBuildL1DeveloperGenesis(t *testing.T) {
require.NoError(t, err)
require.Equal(t, predeploys.DevL1StandardBridgeAddr, bridgeAddr)
weth9, err := bindings.NewWETH9(predeploys.DevWETH9Addr, sim)
require.NoError(t, err)
decimals, err := weth9.Decimals(callOpts)
require.NoError(t, err)
require.Equal(t, uint8(18), decimals)
symbol, err := weth9.Symbol(callOpts)
require.NoError(t, err)
require.Equal(t, "WETH", symbol)
name, err := weth9.Name(callOpts)
require.NoError(t, err)
require.Equal(t, "Wrapped Ether", name)
// test that we can do deposits, etc.
priv, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")
require.NoError(t, err)
......
......@@ -106,6 +106,7 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action {
return
}
s.pendingIndices[from] = i + 1 // won't retry the tx
s.l1BuildingState.Prepare(tx.Hash(), len(s.l1Transactions))
receipt, err := core.ApplyTransaction(s.l1Cfg.Config, s.l1Chain, &s.l1BuildingHeader.Coinbase,
s.l1GasPool, s.l1BuildingState, s.l1BuildingHeader, tx, &s.l1BuildingHeader.GasUsed, *s.l1Chain.GetVMConfig())
if err != nil {
......
package actions
import (
"bytes"
"context"
"crypto/ecdsa"
"io"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
type SyncStatusAPI interface {
SyncStatus(ctx context.Context) (*eth.SyncStatus, error)
}
type BlocksAPI interface {
BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error)
}
type L1TxAPI interface {
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
SendTransaction(ctx context.Context, tx *types.Transaction) error
}
type BatcherCfg struct {
// Limit the size of txs
MinL1TxSize uint64
MaxL1TxSize uint64
BatcherKey *ecdsa.PrivateKey
}
// L2Batcher buffers and submits L2 batches to L1.
//
// TODO: note the batcher shares little logic/state with actual op-batcher,
// tests should only use this actor to build batch contents for rollup node actors to consume,
// until the op-batcher is refactored and can be covered better.
type L2Batcher struct {
log log.Logger
rollupCfg *rollup.Config
syncStatusAPI SyncStatusAPI
l2 BlocksAPI
l1 L1TxAPI
l1Signer types.Signer
l2ChannelOut *derive.ChannelOut
l2Submitting bool // when the channel out is being submitted, and not safe to write to without resetting
l2BufferedBlock eth.BlockID
l2SubmittedBlock eth.BlockID
l2BatcherCfg *BatcherCfg
}
func NewL2Batcher(log log.Logger, rollupCfg *rollup.Config, batcherCfg *BatcherCfg, api SyncStatusAPI, l1 L1TxAPI, l2 BlocksAPI) *L2Batcher {
return &L2Batcher{
log: log,
rollupCfg: rollupCfg,
syncStatusAPI: api,
l1: l1,
l2: l2,
l2BatcherCfg: batcherCfg,
l1Signer: types.LatestSignerForChainID(rollupCfg.L1ChainID),
}
}
// SubmittingData indicates if the actor is submitting buffer data.
// All data must be submitted before it can safely continue buffering more L2 blocks.
func (s *L2Batcher) SubmittingData() bool {
return s.l2Submitting
}
// ActL2BatchBuffer adds the next L2 block to the batch buffer.
// If the buffer is being submitted, the buffer is wiped.
func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
if s.l2Submitting { // break ongoing submitting work if necessary
s.l2ChannelOut = nil
s.l2Submitting = false
}
syncStatus, err := s.syncStatusAPI.SyncStatus(t.Ctx())
require.NoError(t, err, "no sync status error")
// If we just started, start at safe-head
if s.l2SubmittedBlock == (eth.BlockID{}) {
s.log.Info("Starting batch-submitter work at safe-head", "safe", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2ChannelOut = nil
}
// If it's lagging behind, catch it up.
if s.l2SubmittedBlock.Number < syncStatus.SafeL2.Number {
s.log.Warn("last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", s.l2SubmittedBlock, "safe", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2ChannelOut = nil
}
// Create channel if we don't have one yet
if s.l2ChannelOut == nil {
ch, err := derive.NewChannelOut()
require.NoError(t, err, "failed to create channel")
s.l2ChannelOut = ch
}
// Add the next unsafe block to the channel
if s.l2BufferedBlock.Number >= syncStatus.UnsafeL2.Number {
return
}
block, err := s.l2.BlockByNumber(t.Ctx(), big.NewInt(int64(s.l2BufferedBlock.Number+1)))
require.NoError(t, err, "need l2 block %d from sync status", s.l2SubmittedBlock.Number+1)
if block.ParentHash() != s.l2BufferedBlock.Hash {
s.log.Error("detected a reorg in L2 chain vs previous submitted information, resetting to safe head now", "safe_head", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2ChannelOut = nil
}
if err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed
t.Fatalf("failed to add block to channel: %v", err)
}
}
func (s *L2Batcher) ActL2ChannelClose(t Testing) {
// Don't run this action if there's no data to submit
if s.l2ChannelOut == nil {
t.InvalidAction("need to buffer data first, cannot batch submit with empty buffer")
return
}
require.NoError(t, s.l2ChannelOut.Close(), "must close channel before submitting it")
}
// ActL2BatchSubmit constructs a batch tx from previous buffered L2 blocks, and submits it to L1
func (s *L2Batcher) ActL2BatchSubmit(t Testing) {
// Don't run this action if there's no data to submit
if s.l2ChannelOut == nil {
t.InvalidAction("need to buffer data first, cannot batch submit with empty buffer")
return
}
// Collect the output frame
data := new(bytes.Buffer)
data.WriteByte(derive.DerivationVersion0)
// subtract one, to account for the version byte
if err := s.l2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); err == io.EOF {
s.l2Submitting = false
// there may still be some data to submit
} else if err != nil {
s.l2Submitting = false
t.Fatalf("failed to output channel data to frame: %v", err)
}
nonce, err := s.l1.PendingNonceAt(t.Ctx(), s.rollupCfg.BatchSenderAddress)
require.NoError(t, err, "need batcher nonce")
gasTipCap := big.NewInt(2 * params.GWei)
pendingHeader, err := s.l1.HeaderByNumber(t.Ctx(), big.NewInt(-1))
require.NoError(t, err, "need l1 pending header for gas price estimation")
gasFeeCap := new(big.Int).Add(gasTipCap, new(big.Int).Mul(pendingHeader.BaseFee, big.NewInt(2)))
rawTx := &types.DynamicFeeTx{
ChainID: s.rollupCfg.L1ChainID,
Nonce: nonce,
To: &s.rollupCfg.BatchInboxAddress,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: data.Bytes(),
}
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true)
require.NoError(t, err, "need to compute intrinsic gas")
rawTx.Gas = gas
tx, err := types.SignNewTx(s.l2BatcherCfg.BatcherKey, s.l1Signer, rawTx)
require.NoError(t, err, "need to sign tx")
err = s.l1.SendTransaction(t.Ctx(), tx)
require.NoError(t, err, "need to send tx")
}
package actions
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestBatcher(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
SequencerWindowSize: 24,
ChannelTimeout: 20,
}
dp := e2eutils.MakeDeployParams(t, p)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg))
rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
// Alice makes a L2 tx
cl := seqEngine.EthClient()
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
signer := types.LatestSigner(sd.L2Cfg.Config)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// Make L2 block
sequencer.ActL2StartBlock(t)
seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t)
sequencer.ActL2EndBlock(t)
// batch submit to L1
batcher.ActL2BatchBuffer(t)
batcher.ActL2ChannelClose(t)
batcher.ActL2BatchSubmit(t)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(bl.Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block
for i := uint64(1); i < sd.RollupCfg.SeqWindowSize; i++ {
miner.ActL1StartBlock(12)(t)
miner.ActL1EndBlock(t)
}
// sync verifier from L1 batch in otherwise empty sequence window
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, uint64(1), verifier.SyncStatus().SafeL2.L1Origin.Number)
// check that the tx from alice made it into the L2 chain
verifCl := verifEngine.EthClient()
vTx, isPending, err := verifCl.TransactionByHash(t.Ctx(), tx.Hash())
require.NoError(t, err)
require.False(t, isPending)
require.NotNil(t, vTx)
}
......@@ -174,6 +174,7 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return
}
e.pendingIndices[from] = i + 1 // won't retry the tx
e.l2BuildingState.Prepare(tx.Hash(), len(e.l2Transactions))
receipt, err := core.ApplyTransaction(e.l2Cfg.Config, e.l2Chain, &e.l2BuildingHeader.Coinbase,
e.l2GasPool, e.l2BuildingState, e.l2BuildingHeader, tx, &e.l2BuildingHeader.GasUsed, *e.l2Chain.GetVMConfig())
if err != nil {
......
This diff is collapsed.
package actions
import (
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
)
func TestCrossLayerUser(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, seq := setupSequencerTest(t, sd, log)
// need to start derivation before we can make L2 blocks
seq.ActL2PipelineFull(t)
l1Cl := miner.EthClient()
l2Cl := seqEngine.EthClient()
withdrawalsCl := &withdrawals.Client{} // TODO: need a rollup node actor to wrap for output root proof RPC
addresses := e2eutils.CollectAddresses(sd, dp)
l1UserEnv := &BasicUserEnv[*L1Bindings]{
EthCl: l1Cl,
Signer: types.LatestSigner(sd.L1Cfg.Config),
AddressCorpora: addresses,
Bindings: NewL1Bindings(t, l1Cl, &sd.DeploymentsL1),
}
l2UserEnv := &BasicUserEnv[*L2Bindings]{
EthCl: l2Cl,
Signer: types.LatestSigner(sd.L2Cfg.Config),
AddressCorpora: addresses,
Bindings: NewL2Bindings(t, l2Cl, withdrawalsCl),
}
alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)))
alice.L1.SetUserEnv(l1UserEnv)
alice.L2.SetUserEnv(l2UserEnv)
// regular L2 tx, in new L2 block
alice.L2.ActResetTxOpts(t)
alice.L2.ActSetTxToAddr(&dp.Addresses.Bob)(t)
alice.L2.ActMakeTx(t)
seq.ActL2StartBlock(t)
seqEngine.ActL2IncludeTx(alice.Address())(t)
seq.ActL2EndBlock(t)
alice.L2.ActCheckReceiptStatusOfLastTx(true)(t)
// regular L1 tx, in new L1 block
alice.L1.ActResetTxOpts(t)
alice.L1.ActSetTxToAddr(&dp.Addresses.Bob)(t)
alice.L1.ActMakeTx(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(alice.Address())(t)
miner.ActL1EndBlock(t)
alice.L1.ActCheckReceiptStatusOfLastTx(true)(t)
// regular Deposit, in new L1 block
alice.ActDeposit(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(alice.Address())(t)
miner.ActL1EndBlock(t)
seq.ActL1HeadSignal(t)
// sync sequencer build enough blocks to adopt latest L1 origin
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().NumberU64() {
seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t)
}
// Now that the L2 chain adopted the latest L1 block, check that we processed the deposit
alice.ActCheckDepositStatus(true, true)(t)
}
......@@ -43,7 +43,7 @@ func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...rpc.Client
func DialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, opts ...rpc.ClientOption) (*rpc.Client, error) {
bOff := backoff.Exponential()
var ret *rpc.Client
err := backoff.Do(10, bOff, func() error {
err := backoff.DoCtx(ctx, 10, bOff, func() error {
client, err := rpc.DialOptions(ctx, addr, opts...)
if err != nil {
if client == nil {
......
......@@ -4,8 +4,11 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
var _ BlockInfo = (&types.Block{})
type BlockInfo interface {
Hash() common.Hash
ParentHash() common.Hash
......@@ -16,7 +19,6 @@ type BlockInfo interface {
// MixDigest field, reused for randomness after The Merge (Bellatrix hardfork)
MixDigest() common.Hash
BaseFee() *big.Int
ID() BlockID
ReceiptHash() common.Hash
}
......@@ -28,3 +30,15 @@ func InfoToL1BlockRef(info BlockInfo) L1BlockRef {
Time: info.Time(),
}
}
type NumberAndHash interface {
Hash() common.Hash
NumberU64() uint64
}
func ToBlockID(b NumberAndHash) BlockID {
return BlockID{
Hash: b.Hash(),
Number: b.NumberU64(),
}
}
......@@ -59,7 +59,11 @@ func (bq *BatchQueue) Origin() eth.L1BlockRef {
}
func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) (*BatchData, error) {
originBehind := bq.origin.Number < safeL2Head.L1Origin.Number
// Note: We use the origin that we will have to determine if it's behind. This is important
// because it's the future origin that gets saved into the l1Blocks array.
// We always update the origin of this stage if it is not the same so after the update code
// runs, this is consistent.
originBehind := bq.prev.Origin().Number < safeL2Head.L1Origin.Number
// Advance origin if needed
// Note: The entire pipeline has the same origin
......
......@@ -405,7 +405,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
// ResetStep Walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical.
// The unsafe head is set to the head of the L2 chain, unless the existing safe head is not canonical.
func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef) error {
result, err := sync.FindL2Heads(ctx, eq.cfg, eq.l1Fetcher, eq.engine)
result, err := sync.FindL2Heads(ctx, eq.cfg, eq.l1Fetcher, eq.engine, eq.log)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to find the L2 Heads to start from: %w", err))
}
......
......@@ -91,6 +91,7 @@ func (d *Sequencer) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPa
if err != nil {
return nil, fmt.Errorf("failed to complete building on top of L2 chain %s, error (%d): %w", d.buildingOnto.HeadBlockHash, errTyp, err)
}
d.buildingID = eth.PayloadID{}
return payload, nil
}
......@@ -103,7 +104,6 @@ func (d *Sequencer) CreateNewBlock(ctx context.Context, l2Head eth.L2BlockRef, l
if err != nil {
return l2Head, nil, err
}
d.buildingID = eth.PayloadID{}
// Generate an L2 block ref from the payload.
ref, err := derive.PayloadToBlockRef(payload, &d.config.Genesis)
......
......@@ -266,7 +266,7 @@ func (s *Driver) eventLoop() {
s.log.Warn("not creating block, node is deriving new l2 data", "head_l1", l1Head)
break
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
ctx, cancel := context.WithTimeout(ctx, 20*time.Minute)
err := s.createNewL2Block(ctx)
cancel()
if err != nil {
......@@ -309,9 +309,7 @@ func (s *Driver) eventLoop() {
s.metrics.SetDerivationIdle(false)
s.idleDerivation = false
s.log.Debug("Derivation process step", "onto_origin", s.derivation.Origin(), "attempts", stepAttempts)
stepCtx, cancel := context.WithTimeout(ctx, time.Second*10) // TODO pick a timeout for executing a single step
err := s.derivation.Step(stepCtx)
cancel()
err := s.derivation.Step(context.Background())
stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress.
if err == io.EOF {
s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin())
......
......@@ -32,6 +32,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
type L1Chain interface {
......@@ -101,7 +102,7 @@ func currentHeads(ctx context.Context, cfg *rollup.Config, l2 L2Chain) (*FindHea
// Plausible: meaning that the blockhash of the L2 block's L1 origin
// (as reported in the L1 Attributes deposit within the L2 block) is not canonical at another height in the L1 chain,
// and the same holds for all its ancestors.
func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain) (result *FindHeadsResult, err error) {
func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain, lgr log.Logger) (result *FindHeadsResult, err error) {
// Fetch current L2 forkchoice state
result, err = currentHeads(ctx, cfg, l2)
if err != nil {
......@@ -137,6 +138,8 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
ahead = notFound
}
lgr.Trace("walking sync start", "number", n.Number)
// Don't walk past genesis. If we were at the L2 genesis, but could not find its L1 origin,
// the L2 chain is building on the wrong L1 branch.
if n.Number == cfg.Genesis.L2.Number {
......
......@@ -73,7 +73,9 @@ func (c *syncStartTestCase) Run(t *testing.T) {
Genesis: genesis,
SeqWindowSize: c.SeqWindowSize,
}
result, err := FindL2Heads(context.Background(), cfg, chain, chain)
lgr := log.New()
lgr.SetHandler(log.DiscardHandler())
result, err := FindL2Heads(context.Background(), cfg, chain, chain, lgr)
if c.ExpectedErr != nil {
require.ErrorIs(t, err, c.ExpectedErr, "expected error")
return
......
......@@ -251,7 +251,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
for i := 0; i < len(txs); i++ {
txHashes[i] = txs[i].Hash()
}
fetcher = NewReceiptsFetcher(info.ID(), info.ReceiptHash(), txHashes, s.client.BatchCallContext, s.maxBatchSize)
fetcher = NewReceiptsFetcher(eth.ToBlockID(info), info.ReceiptHash(), txHashes, s.client.BatchCallContext, s.maxBatchSize)
s.receiptsCache.Add(blockHash, fetcher)
}
// Fetch all receipts
......
# @eth-optimism/ci-builder
## 0.3.3
### Patch Changes
- 3f485627: Pin slither version to 0.9.0
## 0.3.2
### Patch Changes
......
{
"name": "@eth-optimism/ci-builder",
"version": "0.3.2",
"version": "0.3.3",
"scripts": {},
"license": "MIT",
"dependencies": {}
......
# @eth-optimism/actor-tests
## 0.0.9
### Patch Changes
- Updated dependencies [35a7bb5e]
- Updated dependencies [b40913b1]
- Updated dependencies [a5e715c3]
- Updated dependencies [d18b8aa3]
- @eth-optimism/contracts-bedrock@0.8.1
- @eth-optimism/sdk@1.6.7
## 0.0.8
### Patch Changes
......
{
"name": "@eth-optimism/actor-tests",
"version": "0.0.8",
"version": "0.0.9",
"description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT",
"author": "",
......@@ -18,9 +18,9 @@
"test:coverage": "yarn test"
},
"dependencies": {
"@eth-optimism/contracts-bedrock": "0.8.0",
"@eth-optimism/contracts-bedrock": "0.8.1",
"@eth-optimism/core-utils": "^0.10.1",
"@eth-optimism/sdk": "^1.6.6",
"@eth-optimism/sdk": "^1.6.7",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2",
......
# @eth-optimism/contracts-bedrock
## 0.8.1
### Patch Changes
- 35a7bb5e: Use uint64 for arithmetic in XDM's baseGas
- a5e715c3: Rename the event emitted in the L2ToL1MessagePasser
- d18b8aa3: Removes an unnecessary initializer parameter in the L200
## 0.8.0
### Minor Changes
......
......@@ -7,7 +7,7 @@ import '@eth-optimism/hardhat-deploy-config'
const upgradeABIs = {
L2OutputOracleProxy: async (deployConfig) => [
'initialize(bytes32,uint256,address,address)',
'initialize(bytes32,uint256,address)',
[
deployConfig.l2OutputOracleGenesisL2Output,
deployConfig.l2OutputOracleProposer,
......
{
"name": "@eth-optimism/contracts-bedrock",
"version": "0.8.0",
"version": "0.8.1",
"description": "Contracts for Optimism Specs",
"main": "dist/index",
"types": "dist/index",
......
# @eth-optimism/contracts-periphery
## 1.0.2
### Patch Changes
- e81a6ff5: Goerli nft bridge deployment
- a3242d4f: Fix erc721 factory to match erc21 factory
- ffa5297e: mainnet nft bridge deployments
## 1.0.1
### Patch Changes
......
{
"name": "@eth-optimism/contracts-periphery",
"version": "1.0.1",
"version": "1.0.2",
"description": "[Optimism] External (out-of-protocol) L1 and L2 smart contracts for Optimism",
"main": "dist/index",
"types": "dist/index",
......@@ -55,7 +55,7 @@
"devDependencies": {
"@defi-wonderland/smock": "^2.0.7",
"@eth-optimism/contracts": "^0.5.37",
"@eth-optimism/contracts-bedrock": "^0.8.0",
"@eth-optimism/contracts-bedrock": "^0.8.1",
"@eth-optimism/core-utils": "^0.10.1",
"@eth-optimism/hardhat-deploy-config": "^0.2.4",
"@ethersproject/hardware-wallets": "^5.7.0",
......
# @eth-optimism/drippie-mon
## 0.3.18
### Patch Changes
- Updated dependencies [b40913b1]
- Updated dependencies [a5e715c3]
- Updated dependencies [e81a6ff5]
- Updated dependencies [a3242d4f]
- Updated dependencies [ffa5297e]
- @eth-optimism/sdk@1.6.7
- @eth-optimism/contracts-periphery@1.0.2
## 0.3.17
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/drippie-mon",
"version": "0.3.17",
"version": "0.3.18",
"description": "[Optimism] Service for monitoring Drippie instances",
"main": "dist/index",
"types": "dist/index",
......@@ -33,9 +33,9 @@
},
"dependencies": {
"@eth-optimism/common-ts": "0.6.6",
"@eth-optimism/contracts-periphery": "1.0.1",
"@eth-optimism/contracts-periphery": "1.0.2",
"@eth-optimism/core-utils": "0.10.1",
"@eth-optimism/sdk": "1.6.6",
"@eth-optimism/sdk": "1.6.7",
"ethers": "^5.7.0"
},
"devDependencies": {
......
......@@ -29,7 +29,7 @@
"devDependencies": {
"@eth-optimism/contracts": "0.5.37",
"@eth-optimism/core-utils": "0.10.1",
"@eth-optimism/sdk": "1.6.6",
"@eth-optimism/sdk": "1.6.7",
"@ethersproject/abstract-provider": "^5.7.0",
"chai-as-promised": "^7.1.1",
"chai": "^4.3.4",
......
# @eth-optimism/message-relayer
## 0.5.17
### Patch Changes
- Updated dependencies [b40913b1]
- Updated dependencies [a5e715c3]
- @eth-optimism/sdk@1.6.7
## 0.5.16
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/message-relayer",
"version": "0.5.16",
"version": "0.5.17",
"description": "[Optimism] Service for automatically relaying L2 to L1 transactions",
"main": "dist/index",
"types": "dist/index",
......@@ -33,7 +33,7 @@
"dependencies": {
"@eth-optimism/common-ts": "0.6.6",
"@eth-optimism/core-utils": "0.10.1",
"@eth-optimism/sdk": "1.6.6",
"@eth-optimism/sdk": "1.6.7",
"ethers": "^5.7.0"
},
"devDependencies": {
......
# @eth-optimism/sdk
## 1.6.7
### Patch Changes
- b40913b1: Adds contract addresses for the Bedrock Alpha testnet
- a5e715c3: Rename the event emitted in the L2ToL1MessagePasser
- Updated dependencies [35a7bb5e]
- Updated dependencies [a5e715c3]
- Updated dependencies [d18b8aa3]
- @eth-optimism/contracts-bedrock@0.8.1
## 1.6.6
### Patch Changes
......
{
"name": "@eth-optimism/sdk",
"version": "1.6.6",
"version": "1.6.7",
"description": "[Optimism] Tools for working with Optimism",
"main": "dist/index",
"types": "dist/index",
......@@ -50,7 +50,7 @@
"dependencies": {
"@eth-optimism/contracts": "0.5.37",
"@eth-optimism/core-utils": "0.10.1",
"@eth-optimism/contracts-bedrock": "0.8.0",
"@eth-optimism/contracts-bedrock": "0.8.1",
"lodash": "^4.17.21",
"merkletreejs": "^0.2.27",
"rlp": "^2.2.7"
......
# @eth-optimism/proxyd
## 3.12.0
### Minor Changes
- e9f2c701: Allow disabling backend rate limiter
- ca45a85e: Support pattern matching in exempt origins/user agents
- f4faa44c: adds server.log_level config
## 3.11.0
### Minor Changes
- b3c5eeec: Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError
- 01ae6625: Adds new Redis rate limiter
## 3.10.2
### Patch Changes
......
......@@ -5,6 +5,7 @@ import (
"crypto/rand"
"encoding/hex"
"fmt"
"math"
"sync"
"time"
......@@ -256,5 +257,30 @@ func randStr(l int) string {
return hex.EncodeToString(b)
}
type ServerRateLimiter struct {
type NoopBackendRateLimiter struct{}
var noopBackendRateLimiter = &NoopBackendRateLimiter{}
func (n *NoopBackendRateLimiter) IsBackendOnline(name string) (bool, error) {
return true, nil
}
func (n *NoopBackendRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
return nil
}
func (n *NoopBackendRateLimiter) IncBackendRPS(name string) (int, error) {
return math.MaxInt, nil
}
func (n *NoopBackendRateLimiter) IncBackendWSConns(name string, max int) (bool, error) {
return true, nil
}
func (n *NoopBackendRateLimiter) DecBackendWSConns(name string) error {
return nil
}
func (n *NoopBackendRateLimiter) FlushBackendWSConns(names []string) error {
return nil
}
......@@ -37,6 +37,21 @@ func main() {
log.Crit("error reading config file", "err", err)
}
// update log level from config
logLevel, err := log.LvlFromString(config.Server.LogLevel)
if err != nil {
logLevel = log.LvlInfo
if config.Server.LogLevel != "" {
log.Warn("invalid server.log_level set: " + config.Server.LogLevel)
}
}
log.Root().SetHandler(
log.LvlFilterHandler(
logLevel,
log.StreamHandler(os.Stdout, log.JSONFormat()),
),
)
shutdown, err := proxyd.Start(config)
if err != nil {
log.Crit("error starting proxyd", "err", err)
......
......@@ -14,6 +14,7 @@ type ServerConfig struct {
WSPort int `toml:"ws_port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
LogLevel string `toml:"log_level"`
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
TimeoutSeconds int `toml:"timeout_seconds"`
......@@ -42,6 +43,7 @@ type MetricsConfig struct {
type RateLimitConfig struct {
UseRedis bool `toml:"use_redis"`
EnableBackendRateLimiter bool `toml:"enable_backend_rate_limiter"`
BaseRate int `toml:"base_rate"`
BaseInterval TOMLDuration `toml:"base_interval"`
ExemptOrigins []string `toml:"exempt_origins"`
......
......@@ -19,6 +19,8 @@ ws_port = 8085
# Maximum client body size, in bytes, that the server will accept.
max_body_size_bytes = 10485760
max_concurrent_rpcs = 1000
# Server log level
log_level = "info"
[redis]
# URL to a Redis instance.
......
......@@ -16,3 +16,6 @@ backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true
\ No newline at end of file
......@@ -20,3 +20,6 @@ backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true
\ No newline at end of file
......@@ -26,3 +26,6 @@ backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
[rate_limit]
enable_backend_rate_limiter = true
\ No newline at end of file
{
"name": "@eth-optimism/proxyd",
"version": "3.10.2",
"version": "3.12.0",
"private": true,
"dependencies": {}
}
......@@ -53,11 +53,15 @@ func Start(config *Config) (func(), error) {
var lim BackendRateLimiter
var err error
if redisClient == nil {
if config.RateLimit.EnableBackendRateLimiter {
if redisClient != nil {
lim = NewRedisRateLimiter(redisClient)
} else {
log.Warn("redis is not configured, using local rate limiter")
lim = NewLocalBackendRateLimiter()
}
} else {
lim = NewRedisRateLimiter(redisClient)
lim = noopBackendRateLimiter
}
// While modifying shared globals is a bad practice, the alternative
......
......@@ -8,6 +8,7 @@ import (
"io"
"math"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
......@@ -49,8 +50,8 @@ type Server struct {
upgrader *websocket.Upgrader
mainLim FrontendRateLimiter
overrideLims map[string]FrontendRateLimiter
limExemptOrigins map[string]bool
limExemptUserAgents map[string]bool
limExemptOrigins []*regexp.Regexp
limExemptUserAgents []*regexp.Regexp
rpcServer *http.Server
wsServer *http.Server
cache RPCCache
......@@ -104,15 +105,23 @@ func NewServer(
}
var mainLim FrontendRateLimiter
limExemptOrigins := make(map[string]bool)
limExemptUserAgents := make(map[string]bool)
limExemptOrigins := make([]*regexp.Regexp, 0)
limExemptUserAgents := make([]*regexp.Regexp, 0)
if rateLimitConfig.BaseRate > 0 {
mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main")
for _, origin := range rateLimitConfig.ExemptOrigins {
limExemptOrigins[strings.ToLower(origin)] = true
pattern, err := regexp.Compile(origin)
if err != nil {
return nil, err
}
limExemptOrigins = append(limExemptOrigins, pattern)
}
for _, agent := range rateLimitConfig.ExemptUserAgents {
limExemptUserAgents[strings.ToLower(agent)] = true
pattern, err := regexp.Compile(agent)
if err != nil {
return nil, err
}
limExemptUserAgents = append(limExemptUserAgents, pattern)
}
} else {
mainLim = NoopFrontendRateLimiter
......@@ -548,11 +557,22 @@ func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context
}
func (s *Server) isUnlimitedOrigin(origin string) bool {
return s.limExemptOrigins[strings.ToLower(origin)]
for _, pat := range s.limExemptOrigins {
if pat.MatchString(origin) {
return true
}
}
return false
}
func (s *Server) isUnlimitedUserAgent(origin string) bool {
return s.limExemptUserAgents[strings.ToLower(origin)]
for _, pat := range s.limExemptUserAgents {
if pat.MatchString(origin) {
return true
}
}
return false
}
func setCacheHeader(w http.ResponseWriter, cached bool) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment