Commit 33ceeb19 authored by Adrian Sutton's avatar Adrian Sutton

Merge branch 'develop' into aj/geth-1.13.5

parents 2c966cdf caa9d27e
...@@ -80,13 +80,13 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte { ...@@ -80,13 +80,13 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte {
if stepWitness.HasPreimage() { if stepWitness.HasPreimage() {
t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset) t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset)
poInput, err := encodePreimageOracleInput(t, stepWitness, 0) poInput, err := encodePreimageOracleInput(t, stepWitness, LocalContext{})
require.NoError(t, err, "encode preimage oracle input") require.NoError(t, err, "encode preimage oracle input")
_, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0)) _, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0))
require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas) require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas)
} }
input := encodeStepInput(t, stepWitness, 0) input := encodeStepInput(t, stepWitness, LocalContext{})
ret, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.MIPS, input, startingGas, big.NewInt(0)) ret, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.MIPS, input, startingGas, big.NewInt(0))
require.NoError(t, err, "evm should not fail") require.NoError(t, err, "evm should not fail")
require.Len(t, ret, 32, "expecting 32-byte state hash") require.Len(t, ret, 32, "expecting 32-byte state hash")
...@@ -109,7 +109,7 @@ func encodeStepInput(t *testing.T, wit *StepWitness, localContext LocalContext) ...@@ -109,7 +109,7 @@ func encodeStepInput(t *testing.T, wit *StepWitness, localContext LocalContext)
mipsAbi, err := bindings.MIPSMetaData.GetAbi() mipsAbi, err := bindings.MIPSMetaData.GetAbi()
require.NoError(t, err) require.NoError(t, err)
input, err := mipsAbi.Pack("step", wit.State, wit.MemProof, new(big.Int).SetUint64(uint64(localContext))) input, err := mipsAbi.Pack("step", wit.State, wit.MemProof, localContext)
require.NoError(t, err) require.NoError(t, err)
return input return input
} }
...@@ -132,7 +132,7 @@ func encodePreimageOracleInput(t *testing.T, wit *StepWitness, localContext Loca ...@@ -132,7 +132,7 @@ func encodePreimageOracleInput(t *testing.T, wit *StepWitness, localContext Loca
copy(tmp[:], preimagePart) copy(tmp[:], preimagePart)
input, err := preimageAbi.Pack("loadLocalData", input, err := preimageAbi.Pack("loadLocalData",
new(big.Int).SetBytes(wit.PreimageKey[1:]), new(big.Int).SetBytes(wit.PreimageKey[1:]),
new(big.Int).SetUint64(uint64(localContext)), localContext,
tmp, tmp,
new(big.Int).SetUint64(uint64(len(preimagePart))), new(big.Int).SetUint64(uint64(len(preimagePart))),
new(big.Int).SetUint64(uint64(wit.PreimageOffset)), new(big.Int).SetUint64(uint64(wit.PreimageOffset)),
...@@ -292,7 +292,7 @@ func TestEVMFault(t *testing.T) { ...@@ -292,7 +292,7 @@ func TestEVMFault(t *testing.T) {
State: initialState.EncodeWitness(), State: initialState.EncodeWitness(),
MemProof: insnProof[:], MemProof: insnProof[:],
} }
input := encodeStepInput(t, stepWitness, 0) input := encodeStepInput(t, stepWitness, LocalContext{})
startingGas := uint64(30_000_000) startingGas := uint64(30_000_000)
_, _, err := env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0)) _, _, err := env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0))
......
package mipsevm package mipsevm
type LocalContext uint64 import "github.com/ethereum/go-ethereum/common"
type LocalContext common.Hash
type StepWitness struct { type StepWitness struct {
// encoded state witness // encoded state witness
......
...@@ -14,6 +14,7 @@ COPY ./indexer /app/indexer ...@@ -14,6 +14,7 @@ COPY ./indexer /app/indexer
COPY ./op-bindings /app/op-bindings COPY ./op-bindings /app/op-bindings
COPY ./op-service /app/op-service COPY ./op-service /app/op-service
COPY ./op-node /app/op-node COPY ./op-node /app/op-node
COPY ./op-chain-ops /app/op-chain-ops
WORKDIR /app/indexer WORKDIR /app/indexer
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"devDependencies": { "devDependencies": {
"tsup": "^7.2.0", "tsup": "^8.0.1",
"vitest": "^0.34.4" "vitest": "^0.34.4"
} }
} }
...@@ -39,6 +39,11 @@ type L2BridgeMessage struct { ...@@ -39,6 +39,11 @@ type L2BridgeMessage struct {
TransactionWithdrawalHash common.Hash `gorm:"serializer:bytes"` TransactionWithdrawalHash common.Hash `gorm:"serializer:bytes"`
} }
type L2BridgeMessageVersionedMessageHash struct {
MessageHash common.Hash `gorm:"primaryKey;serializer:bytes"`
V1MessageHash common.Hash `gorm:"serializer:bytes"`
}
type BridgeMessagesView interface { type BridgeMessagesView interface {
L1BridgeMessage(common.Hash) (*L1BridgeMessage, error) L1BridgeMessage(common.Hash) (*L1BridgeMessage, error)
L1BridgeMessageWithFilter(BridgeMessage) (*L1BridgeMessage, error) L1BridgeMessageWithFilter(BridgeMessage) (*L1BridgeMessage, error)
...@@ -55,6 +60,8 @@ type BridgeMessagesDB interface { ...@@ -55,6 +60,8 @@ type BridgeMessagesDB interface {
StoreL2BridgeMessages([]L2BridgeMessage) error StoreL2BridgeMessages([]L2BridgeMessage) error
MarkRelayedL2BridgeMessage(common.Hash, uuid.UUID) error MarkRelayedL2BridgeMessage(common.Hash, uuid.UUID) error
StoreL2BridgeMessageV1MessageHash(common.Hash, common.Hash) error
} }
/** /**
...@@ -134,8 +141,37 @@ func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) err ...@@ -134,8 +141,37 @@ func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) err
return result.Error return result.Error
} }
func (db bridgeMessagesDB) StoreL2BridgeMessageV1MessageHash(msgHash, v1MsgHash common.Hash) error {
if msgHash == v1MsgHash {
return fmt.Errorf("message hash is equal to the v1 message: %s", msgHash)
}
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "message_hash"}}, DoNothing: true})
result := deduped.Create(&L2BridgeMessageVersionedMessageHash{MessageHash: msgHash, V1MessageHash: v1MsgHash})
if result.Error == nil && int(result.RowsAffected) < 1 {
db.log.Warn("ignored L2 bridge v1 message hash duplicates")
}
return result.Error
}
func (db bridgeMessagesDB) L2BridgeMessage(msgHash common.Hash) (*L2BridgeMessage, error) { func (db bridgeMessagesDB) L2BridgeMessage(msgHash common.Hash) (*L2BridgeMessage, error) {
return db.L2BridgeMessageWithFilter(BridgeMessage{MessageHash: msgHash}) message, err := db.L2BridgeMessageWithFilter(BridgeMessage{MessageHash: msgHash})
if message != nil || err != nil {
return message, err
}
// check if this is a v1 hash of an older message
versioned := L2BridgeMessageVersionedMessageHash{V1MessageHash: msgHash}
result := db.gorm.Where(&versioned).Take(&versioned)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return db.L2BridgeMessageWithFilter(BridgeMessage{MessageHash: versioned.MessageHash})
} }
func (db bridgeMessagesDB) L2BridgeMessageWithFilter(filter BridgeMessage) (*L2BridgeMessage, error) { func (db bridgeMessagesDB) L2BridgeMessageWithFilter(filter BridgeMessage) (*L2BridgeMessage, error) {
......
...@@ -4,12 +4,11 @@ import ( ...@@ -4,12 +4,11 @@ import (
"errors" "errors"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
) )
type CrossDomainMessengerSentMessage struct { type CrossDomainMessengerSentMessage struct {
...@@ -40,7 +39,17 @@ func ParseCrossDomainMessage(sentMessageReceipt *types.Receipt) (CrossDomainMess ...@@ -40,7 +39,17 @@ func ParseCrossDomainMessage(sentMessageReceipt *types.Receipt) (CrossDomainMess
if err != nil { if err != nil {
return CrossDomainMessengerSentMessage{}, err return CrossDomainMessengerSentMessage{}, err
} }
msgHash, err := CrossDomainMessengerSentMessageHash(sentMessage, sentMessageExtension.Value)
msg := crossdomain.NewCrossDomainMessage(
sentMessage.MessageNonce,
sentMessage.Sender,
sentMessage.Target,
sentMessageExtension.Value,
sentMessage.GasLimit,
sentMessage.Message,
)
msgHash, err := msg.Hash()
if err != nil { if err != nil {
return CrossDomainMessengerSentMessage{}, err return CrossDomainMessengerSentMessage{}, err
} }
...@@ -51,17 +60,3 @@ func ParseCrossDomainMessage(sentMessageReceipt *types.Receipt) (CrossDomainMess ...@@ -51,17 +60,3 @@ func ParseCrossDomainMessage(sentMessageReceipt *types.Receipt) (CrossDomainMess
return CrossDomainMessengerSentMessage{}, errors.New("missing SentMessage receipts") return CrossDomainMessengerSentMessage{}, errors.New("missing SentMessage receipts")
} }
func CrossDomainMessengerSentMessageHash(sentMessage *bindings.CrossDomainMessengerSentMessage, value *big.Int) (common.Hash, error) {
abi, err := bindings.CrossDomainMessengerMetaData.GetAbi()
if err != nil {
return common.Hash{}, err
}
calldata, err := contracts.CrossDomainMessageCalldata(abi, sentMessage, value)
if err != nil {
return common.Hash{}, err
}
return crypto.Keccak256Hash(calldata), nil
}
...@@ -176,6 +176,22 @@ CREATE INDEX IF NOT EXISTS l2_bridge_messages_timestamp ON l2_bridge_messages(ti ...@@ -176,6 +176,22 @@ CREATE INDEX IF NOT EXISTS l2_bridge_messages_timestamp ON l2_bridge_messages(ti
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_bridge_messages(transaction_withdrawal_hash); CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_bridge_messages(transaction_withdrawal_hash);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address); CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address);
/**
* Since the CDM uses the latest versioned message hash when emitting the `RelayedMessage` event, we need
* to keep track of all of the future versions of message hashes such that legacy messages can be queried
* queried for when relayed on L1
*
* As new the CDM is updated with new versions, we need to ensure that there's a better way to correlate message between
* chains (adding the message nonce to the RelayedMessage event) or continue to add columns to this table and migrate
* unrelayed messages such that finalization logic can handle switching between the varying versioned message hashes
*/
CREATE TABLE IF NOT EXISTS l2_bridge_message_versioned_message_hashes(
message_hash VARCHAR PRIMARY KEY NOT NULL UNIQUE REFERENCES l2_bridge_messages(message_hash),
-- only filled in if `message_hash` is for a v0 message
v1_message_hash VARCHAR UNIQUE
);
-- StandardBridge -- StandardBridge
CREATE TABLE IF NOT EXISTS l1_bridge_deposits ( CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
transaction_source_hash VARCHAR PRIMARY KEY REFERENCES l1_transaction_deposits(source_hash) ON DELETE CASCADE, transaction_source_hash VARCHAR PRIMARY KEY REFERENCES l1_transaction_deposits(source_hash) ON DELETE CASCADE,
......
...@@ -256,7 +256,7 @@ func (b *BridgeProcessor) processInitiatedL2Events() error { ...@@ -256,7 +256,7 @@ func (b *BridgeProcessor) processInitiatedL2Events() error {
legacyBridgeLog := l2BridgeLog.New("mode", "legacy", "from_block_number", legacyFromL2Height, "to_block_number", legacyToL2Height) legacyBridgeLog := l2BridgeLog.New("mode", "legacy", "from_block_number", legacyFromL2Height, "to_block_number", legacyToL2Height)
legacyBridgeLog.Info("scanning for initiated bridge events") legacyBridgeLog.Info("scanning for initiated bridge events")
if err := bridge.LegacyL2ProcessInitiatedBridgeEvents(legacyBridgeLog, tx, b.metrics, b.chainConfig.L2Contracts, legacyFromL2Height, legacyToL2Height); err != nil { if err := bridge.LegacyL2ProcessInitiatedBridgeEvents(legacyBridgeLog, tx, b.metrics, b.chainConfig.Preset, b.chainConfig.L2Contracts, legacyFromL2Height, legacyToL2Height); err != nil {
return err return err
} else if legacyToL2Height.Cmp(toL2Height) == 0 { } else if legacyToL2Height.Cmp(toL2Height) == 0 {
return nil // a-ok! Entire range was legacy blocks return nil // a-ok! Entire range was legacy blocks
......
...@@ -12,6 +12,8 @@ import ( ...@@ -12,6 +12,8 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts" "github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
) )
// Legacy Bridge Initiation // Legacy Bridge Initiation
...@@ -145,7 +147,7 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -145,7 +147,7 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
// 1. L2CrossDomainMessenger - The LegacyMessagePasser contract cannot be used as entrypoint to bridge transactions from L2. The protocol // 1. L2CrossDomainMessenger - The LegacyMessagePasser contract cannot be used as entrypoint to bridge transactions from L2. The protocol
// only allows the L2CrossDomainMessenger as the sole sender when relaying a bridged message. // only allows the L2CrossDomainMessenger as the sole sender when relaying a bridged message.
// 2. L2StandardBridge // 2. L2StandardBridge
func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2Metricer, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error { func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2Metricer, preset int, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error {
// (1) L2CrossDomainMessenger // (1) L2CrossDomainMessenger
crossDomainSentMessages, err := contracts.CrossDomainMessengerSentMessageEvents("l2", l2Contracts.L2CrossDomainMessenger, db, fromHeight, toHeight) crossDomainSentMessages, err := contracts.CrossDomainMessengerSentMessageEvents("l2", l2Contracts.L2CrossDomainMessenger, db, fromHeight, toHeight)
if err != nil { if err != nil {
...@@ -168,22 +170,38 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -168,22 +170,38 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
sentMessage := crossDomainSentMessages[i] sentMessage := crossDomainSentMessages[i]
withdrawnWEI = new(big.Int).Add(withdrawnWEI, sentMessage.BridgeMessage.Tx.Amount) withdrawnWEI = new(big.Int).Add(withdrawnWEI, sentMessage.BridgeMessage.Tx.Amount)
// We re-use the L2CrossDomainMessenger message hash as the withdrawal hash to remain consistent in the schema. // Since these message can be relayed in bedrock, we utilize the migrated withdrawal hash
// and also store the v1 version of the message hash such that the bedrock l1 finalization
// processor works as expected
v1MessageHash, err := legacyBridgeMessageV1MessageHash(&sentMessage.BridgeMessage)
if err != nil {
return fmt.Errorf("failed to compute versioned message hash: %w", err)
}
if err := db.BridgeMessages.StoreL2BridgeMessageV1MessageHash(sentMessage.BridgeMessage.MessageHash, v1MessageHash); err != nil {
return err
}
withdrawalHash, err := legacyBridgeMessageWithdrawalHash(preset, &sentMessage.BridgeMessage)
if err != nil {
return fmt.Errorf("failed to construct migrated withdrawal hash: %w", err)
}
transactionWithdrawals[i] = database.L2TransactionWithdrawal{ transactionWithdrawals[i] = database.L2TransactionWithdrawal{
WithdrawalHash: sentMessage.BridgeMessage.MessageHash, WithdrawalHash: withdrawalHash,
InitiatedL2EventGUID: sentMessage.Event.GUID, InitiatedL2EventGUID: sentMessage.Event.GUID,
Nonce: sentMessage.BridgeMessage.Nonce, Nonce: sentMessage.BridgeMessage.Nonce,
GasLimit: sentMessage.BridgeMessage.GasLimit, GasLimit: sentMessage.BridgeMessage.GasLimit,
Tx: database.Transaction{ Tx: database.Transaction{
FromAddress: sentMessage.BridgeMessage.Tx.FromAddress, FromAddress: sentMessage.BridgeMessage.Tx.FromAddress,
ToAddress: sentMessage.BridgeMessage.Tx.ToAddress, ToAddress: sentMessage.BridgeMessage.Tx.ToAddress,
Amount: big.NewInt(0), Amount: bigint.Zero,
Data: sentMessage.BridgeMessage.Tx.Data, Data: sentMessage.BridgeMessage.Tx.Data,
Timestamp: sentMessage.Event.Timestamp, Timestamp: sentMessage.Event.Timestamp,
}, },
} }
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = sentMessageEvent{&sentMessage, sentMessage.BridgeMessage.MessageHash} sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = sentMessageEvent{&sentMessage, withdrawalHash}
bridgeMessages[i] = database.L2BridgeMessage{ bridgeMessages[i] = database.L2BridgeMessage{
TransactionWithdrawalHash: sentMessage.BridgeMessage.MessageHash, TransactionWithdrawalHash: sentMessage.BridgeMessage.MessageHash,
BridgeMessage: sentMessage.BridgeMessage, BridgeMessage: sentMessage.BridgeMessage,
...@@ -297,16 +315,17 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -297,16 +315,17 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
} }
} }
// Mark the associated tx withdrawal as proven/finalized with the same event. The message hash is also the transaction withdrawal hash if err := db.BridgeMessages.MarkRelayedL2BridgeMessage(relayedMessage.MessageHash, relayedMessage.Event.GUID); err != nil {
if err := db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(relayedMessage.MessageHash, relayedMessage.Event.GUID); err != nil { return fmt.Errorf("failed to relay cross domain message. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err)
}
// Mark the associated tx withdrawal as proven/finalized with the same event.
if err := db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(message.TransactionWithdrawalHash, relayedMessage.Event.GUID); err != nil {
return fmt.Errorf("failed to mark withdrawal as proven. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err) return fmt.Errorf("failed to mark withdrawal as proven. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err)
} }
if err := db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(relayedMessage.MessageHash, relayedMessage.Event.GUID, true); err != nil { if err := db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(message.TransactionWithdrawalHash, relayedMessage.Event.GUID, true); err != nil {
return fmt.Errorf("failed to mark withdrawal as finalized. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err) return fmt.Errorf("failed to mark withdrawal as finalized. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err)
} }
if err := db.BridgeMessages.MarkRelayedL2BridgeMessage(relayedMessage.MessageHash, relayedMessage.Event.GUID); err != nil {
return fmt.Errorf("failed to relay cross domain message. tx_hash = %s: %w", relayedMessage.Event.TransactionHash, err)
}
} }
if len(crossDomainRelayedMessages) > 0 { if len(crossDomainRelayedMessages) > 0 {
metrics.RecordL1ProvenWithdrawals(len(crossDomainRelayedMessages)) metrics.RecordL1ProvenWithdrawals(len(crossDomainRelayedMessages))
...@@ -370,3 +389,27 @@ func LegacyL2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -370,3 +389,27 @@ func LegacyL2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
// a-ok! // a-ok!
return nil return nil
} }
// Utils
func legacyBridgeMessageWithdrawalHash(preset int, msg *database.BridgeMessage) (common.Hash, error) {
l1Cdm := config.Presets[preset].ChainConfig.L1Contracts.L1CrossDomainMessengerProxy
legacyWithdrawal := crossdomain.NewLegacyWithdrawal(predeploys.L2CrossDomainMessengerAddr, msg.Tx.ToAddress, msg.Tx.FromAddress, msg.Tx.Data, msg.Nonce)
migratedWithdrawal, err := crossdomain.MigrateWithdrawal(legacyWithdrawal, &l1Cdm, big.NewInt(int64(preset)))
if err != nil {
return common.Hash{}, err
}
return migratedWithdrawal.Hash()
}
func legacyBridgeMessageV1MessageHash(msg *database.BridgeMessage) (common.Hash, error) {
legacyWithdrawal := crossdomain.NewLegacyWithdrawal(predeploys.L2CrossDomainMessengerAddr, msg.Tx.ToAddress, msg.Tx.FromAddress, msg.Tx.Data, msg.Nonce)
value, err := legacyWithdrawal.Value()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to extract ETH value from legacy bridge message: %w", err)
}
// Note: GasLimit is always zero. Only the GasLimit for the withdrawal transaction was migrated
return crossdomain.HashCrossDomainMessageV1(msg.Nonce, msg.Tx.FromAddress, msg.Tx.ToAddress, value, new(big.Int), msg.Tx.Data)
}
package bridge
import (
"testing"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
func TestLegacyWithdrawalAndMessageHash(t *testing.T) {
// Pre-Bedrock OP-Goerli withdrawal that was proven post-bedrock
// - L1 proven withdrawal tx: 0xa8853a3532f40052385602c66512e438bc1e3736d3cb7abde359f5b9377441c7
value := bigint.Zero
expectedWithdrawalHash := common.HexToHash("0xae99d25df3e38730f6ee6588733417e20a131923b84870be6aedb4f863b6302d")
// Ensure the L2 Tx which correlates with the above proven withdrawal results in the same computed withdrawal hash
// - L2 withdrawal tx: 0x254d9c28add020404142f840ed794cea51f86c0f0a737e3e7bdd7e1e4550962e
abi, err := bindings.CrossDomainMessengerMetaData.GetAbi()
require.NoError(t, err)
var sentMessage bindings.CrossDomainMessengerSentMessage
sentMessageEvent := abi.Events["SentMessage"]
logData := common.FromHex("0x0000000000000000000000004200000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000186a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000003b8e53b3ab8e01fb57d0c9e893bc4d655aa67d84000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, contracts.UnpackLog(&sentMessage, &types.Log{Data: logData, Topics: []common.Hash{sentMessageEvent.ID, common.HexToHash("0x000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa8")}}, sentMessageEvent.Name, abi))
// timestamp and message hash are filled in fields. not core to the event
msg := database.BridgeMessage{
Nonce: sentMessage.MessageNonce,
GasLimit: sentMessage.GasLimit,
Tx: database.Transaction{FromAddress: sentMessage.Sender, ToAddress: sentMessage.Target, Amount: value, Data: sentMessage.Message},
}
hash, err := legacyBridgeMessageWithdrawalHash(420, &msg)
require.NoError(t, err)
require.Equal(t, expectedWithdrawalHash, hash)
// Ensure the relayed message hash (v1) matches
expectedMessageHash := common.HexToHash("0xcb16ecc1967f5d7aed909349a4351d28fbb396429ef7faf1c9d2a670e3ca906f")
v1MessageHash, err := legacyBridgeMessageV1MessageHash(&msg)
require.NoError(t, err)
require.Equal(t, expectedMessageHash, v1MessageHash)
}
...@@ -6,17 +6,18 @@ import ( ...@@ -6,17 +6,18 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
) )
var ( var (
// Standard ABI types copied from golang ABI tests // Standard ABI types copied from golang ABI tests
uint256Type, _ = abi.NewType("uint256", "", nil)
bytesType, _ = abi.NewType("bytes", "", nil)
addressType, _ = abi.NewType("address", "", nil) addressType, _ = abi.NewType("address", "", nil)
bytesType, _ = abi.NewType("bytes", "", nil)
uint256Type, _ = abi.NewType("uint256", "", nil)
CrossDomainMessengerLegacyRelayMessageEncoding = abi.NewMethod( CrossDomainMessengerLegacyRelayMessageEncoding = abi.NewMethod(
"relayMessage", "relayMessage",
...@@ -39,8 +40,8 @@ var ( ...@@ -39,8 +40,8 @@ var (
type CrossDomainMessengerSentMessageEvent struct { type CrossDomainMessengerSentMessageEvent struct {
Event *database.ContractEvent Event *database.ContractEvent
MessageCalldata []byte
BridgeMessage database.BridgeMessage BridgeMessage database.BridgeMessage
Version uint16
} }
type CrossDomainMessengerRelayedMessageEvent struct { type CrossDomainMessengerRelayedMessageEvent struct {
...@@ -85,32 +86,46 @@ func CrossDomainMessengerSentMessageEvents(chainSelector string, contractAddress ...@@ -85,32 +86,46 @@ func CrossDomainMessengerSentMessageEvents(chainSelector string, contractAddress
return nil, err return nil, err
} }
version, _ := DecodeVersionedNonce(sentMessage.MessageNonce) _, versionBig := crossdomain.DecodeVersionedNonce(sentMessage.MessageNonce)
version := uint16(versionBig.Uint64())
if i < numVersionZeroMessages && version != 0 { if i < numVersionZeroMessages && version != 0 {
return nil, fmt.Errorf("expected version zero nonce. nonce %d tx_hash %s", sentMessage.MessageNonce, sentMessage.Raw.TxHash) return nil, fmt.Errorf("expected version zero nonce: nonce %d, tx_hash %s", sentMessage.MessageNonce, sentMessage.Raw.TxHash)
} }
// In version zero, to value is bridged through the cross domain messenger. value := bigint.Zero
value := big.NewInt(0) var messageHash common.Hash
if version > 0 { switch version {
case 0:
messageHash, err = crossdomain.HashCrossDomainMessageV0(sentMessage.Target, sentMessage.Sender, sentMessage.Message, sentMessage.MessageNonce)
if err != nil {
return nil, err
}
case 1:
sentMessageExtension := bindings.CrossDomainMessengerSentMessageExtension1{Raw: *sentMessageExtensionEvents[i].RLPLog} sentMessageExtension := bindings.CrossDomainMessengerSentMessageExtension1{Raw: *sentMessageExtensionEvents[i].RLPLog}
err = UnpackLog(&sentMessageExtension, sentMessageExtensionEvents[i].RLPLog, sentMessageExtensionEventAbi.Name, crossDomainMessengerAbi) err = UnpackLog(&sentMessageExtension, sentMessageExtensionEvents[i].RLPLog, sentMessageExtensionEventAbi.Name, crossDomainMessengerAbi)
if err != nil { if err != nil {
return nil, err return nil, err
} }
value = sentMessageExtension.Value
}
messageCalldata, err := CrossDomainMessageCalldata(crossDomainMessengerAbi, &sentMessage, value) value = sentMessageExtension.Value
messageHash, err = crossdomain.HashCrossDomainMessageV1(sentMessage.MessageNonce, sentMessage.Sender, sentMessage.Target, value, sentMessage.GasLimit, sentMessage.Message)
if err != nil { if err != nil {
return nil, err return nil, err
} }
default:
// NOTE: We explicitly fail here since the presence of a new version means finalization
// logic needs to be updated to ensure L1 finalization can run from genesis and handle
// the changing version formats. This is meant to be a serving indicator of this.
return nil, fmt.Errorf("expected cross domain version 0 or version 1: %d", version)
}
crossDomainSentMessages[i] = CrossDomainMessengerSentMessageEvent{ crossDomainSentMessages[i] = CrossDomainMessengerSentMessageEvent{
Event: &sentMessageEvents[i], Event: &sentMessageEvents[i],
MessageCalldata: messageCalldata, Version: version,
BridgeMessage: database.BridgeMessage{ BridgeMessage: database.BridgeMessage{
MessageHash: crypto.Keccak256Hash(messageCalldata), MessageHash: messageHash,
Nonce: sentMessage.MessageNonce, Nonce: sentMessage.MessageNonce,
SentMessageEventGUID: sentMessageEvents[i].GUID, SentMessageEventGUID: sentMessageEvents[i].GUID,
GasLimit: sentMessage.GasLimit, GasLimit: sentMessage.GasLimit,
...@@ -157,26 +172,3 @@ func CrossDomainMessengerRelayedMessageEvents(chainSelector string, contractAddr ...@@ -157,26 +172,3 @@ func CrossDomainMessengerRelayedMessageEvents(chainSelector string, contractAddr
return crossDomainRelayedMessages, nil return crossDomainRelayedMessages, nil
} }
// Replica of `Encoding.sol#encodeCrossDomainMessage` solidity implementation
func CrossDomainMessageCalldata(abi *abi.ABI, sentMsg *bindings.CrossDomainMessengerSentMessage, value *big.Int) ([]byte, error) {
version, _ := DecodeVersionedNonce(sentMsg.MessageNonce)
switch version {
case 0:
// Legacy Message
inputBytes, err := CrossDomainMessengerLegacyRelayMessageEncoding.Inputs.Pack(sentMsg.Target, sentMsg.Sender, sentMsg.Message, sentMsg.MessageNonce)
if err != nil {
return nil, err
}
return append(CrossDomainMessengerLegacyRelayMessageEncoding.ID, inputBytes...), nil
case 1:
// Current Message
msgBytes, err := abi.Pack("relayMessage", sentMsg.MessageNonce, sentMsg.Sender, sentMsg.Target, value, sentMsg.GasLimit, sentMsg.Message)
if err != nil {
return nil, err
}
return msgBytes, nil
}
return nil, fmt.Errorf("unsupported cross domain messenger version: %d", version)
}
package contracts package contracts
import ( import (
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
// DecodeVersionNonce is an re-implementation of Encoding.sol#decodeVersionedNonce.
// If the nonce is greater than 32 bytes (solidity uint256), bytes [32:] are ignored
func DecodeVersionedNonce(nonce *big.Int) (uint16, *big.Int) {
nonceBytes := nonce.Bytes()
nonceByteLen := len(nonceBytes)
if nonceByteLen < 30 {
// version is 0x0000
return 0, nonce
} else if nonceByteLen == 31 {
// version is 0x00[01..ff]
return uint16(nonceBytes[0]), new(big.Int).SetBytes(nonceBytes[1:])
} else {
// fully specified
version := binary.BigEndian.Uint16(nonceBytes[:2])
return version, new(big.Int).SetBytes(nonceBytes[2:])
}
}
func UnpackLog(out interface{}, log *types.Log, name string, contractAbi *abi.ABI) error { func UnpackLog(out interface{}, log *types.Log, name string, contractAbi *abi.ABI) error {
eventAbi, ok := contractAbi.Events[name] eventAbi, ok := contractAbi.Events[name]
if !ok { if !ok {
......
...@@ -312,6 +312,10 @@ func (bs *BatcherService) Stop(ctx context.Context) error { ...@@ -312,6 +312,10 @@ func (bs *BatcherService) Stop(ctx context.Context) error {
result = errors.Join(result, fmt.Errorf("failed to close balance metricer: %w", err)) result = errors.Join(result, fmt.Errorf("failed to close balance metricer: %w", err))
} }
} }
if bs.TxManager != nil {
bs.TxManager.Close()
}
if bs.metricsSrv != nil { if bs.metricsSrv != nil {
if err := bs.metricsSrv.Stop(ctx); err != nil { if err := bs.metricsSrv.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err)) result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
......
This diff is collapsed.
...@@ -13,7 +13,7 @@ const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\": ...@@ -13,7 +13,7 @@ const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":
var AlphabetVMStorageLayout = new(solc.StorageLayout) var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063836e7b3214610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610098610093366004610213565b6100a6565b60405190815260200161007c565b600080600060087f0000000000000000000000000000000000000000000000000000000000000000901b600889896040516100e2929190610287565b6040518091039020901b03610108576000915061010187890189610297565b9050610127565b610114878901896102b0565b90925090508161012381610301565b9250505b81610133826001610339565b604080516020810193909352820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01000000000000000000000000000000000000000000000000000000000000001798975050505050505050565b60008083601f8401126101dc57600080fd5b50813567ffffffffffffffff8111156101f457600080fd5b60208301915083602082850101111561020c57600080fd5b9250929050565b60008060008060006060868803121561022b57600080fd5b853567ffffffffffffffff8082111561024357600080fd5b61024f89838a016101ca565b9097509550602088013591508082111561026857600080fd5b50610275888289016101ca565b96999598509660400135949350505050565b8183823760009101908152919050565b6000602082840312156102a957600080fd5b5035919050565b600080604083850312156102c357600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610332576103326102d2565b5060010190565b6000821982111561034c5761034c6102d2565b50019056fea164736f6c634300080f000a" var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063e14ced3214610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610098610093366004610213565b6100a6565b60405190815260200161007c565b600080600060087f0000000000000000000000000000000000000000000000000000000000000000901b600889896040516100e2929190610287565b6040518091039020901b03610108576000915061010187890189610297565b9050610127565b610114878901896102b0565b90925090508161012381610301565b9250505b81610133826001610339565b604080516020810193909352820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01000000000000000000000000000000000000000000000000000000000000001798975050505050505050565b60008083601f8401126101dc57600080fd5b50813567ffffffffffffffff8111156101f457600080fd5b60208301915083602082850101111561020c57600080fd5b9250929050565b60008060008060006060868803121561022b57600080fd5b853567ffffffffffffffff8082111561024357600080fd5b61024f89838a016101ca565b9097509550602088013591508082111561026857600080fd5b50610275888289016101ca565b96999598509660400135949350505050565b8183823760009101908152919050565b6000602082840312156102a957600080fd5b5035919050565b600080604083850312156102c357600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610332576103326102d2565b5060010190565b6000821982111561034c5761034c6102d2565b50019056fea164736f6c634300080f000a"
func init() { func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil { if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -111,7 +111,7 @@ type DeployConfig struct { ...@@ -111,7 +111,7 @@ type DeployConfig struct {
L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"` L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"`
// L2GenesisCanyonTimeOffset is the number of seconds after genesis block that Canyon hard fork activates. // L2GenesisCanyonTimeOffset is the number of seconds after genesis block that Canyon hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Canyon. // Set it to 0 to activate at genesis. Nil to disable Canyon.
L2GenesisCanyonTimeOffset *hexutil.Uint64 `json:"L2GenesisCanyonTimeOffset,omitempty"` L2GenesisCanyonTimeOffset *hexutil.Uint64 `json:"l2GenesisCanyonTimeOffset,omitempty"`
// L2GenesisSpanBatchTimeOffset is the number of seconds after genesis block that Span Batch hard fork activates. // L2GenesisSpanBatchTimeOffset is the number of seconds after genesis block that Span Batch hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable SpanBatch. // Set it to 0 to activate at genesis. Nil to disable SpanBatch.
L2GenesisSpanBatchTimeOffset *hexutil.Uint64 `json:"l2GenesisSpanBatchTimeOffset,omitempty"` L2GenesisSpanBatchTimeOffset *hexutil.Uint64 `json:"l2GenesisSpanBatchTimeOffset,omitempty"`
......
...@@ -2,22 +2,19 @@ package op_challenger ...@@ -2,22 +2,19 @@ package op_challenger
import ( import (
"context" "context"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game" "github.com/ethereum-optimism/optimism/op-challenger/game"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/cliapp"
) )
// Main is the programmatic entry-point for running op-challenger // Main is the programmatic entry-point for running op-challenger with a given configuration.
func Main(ctx context.Context, logger log.Logger, cfg *config.Config) error { func Main(ctx context.Context, logger log.Logger, cfg *config.Config) (cliapp.Lifecycle, error) {
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
return err return nil, err
} }
service, err := game.NewService(ctx, logger, cfg) srv, err := game.NewService(ctx, logger, cfg)
if err != nil { return srv, err
return fmt.Errorf("failed to create the fault service: %w", err)
}
return service.MonitorGame(ctx)
} }
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) { func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) {
cfg := &config.Config{} cfg := &config.Config{}
err := Main(context.Background(), testlog.Logger(t, log.LvlInfo), cfg) app, err := Main(context.Background(), testlog.Logger(t, log.LvlInfo), cfg)
require.ErrorIs(t, err, cfg.Check()) require.ErrorIs(t, err, cfg.Check())
require.Nil(t, app)
} }
...@@ -4,16 +4,18 @@ import ( ...@@ -4,16 +4,18 @@ import (
"context" "context"
"os" "os"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum/go-ethereum/log"
challenger "github.com/ethereum-optimism/optimism/op-challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/flags" "github.com/ethereum-optimism/optimism/op-challenger/flags"
"github.com/ethereum-optimism/optimism/op-challenger/version" "github.com/ethereum-optimism/optimism/op-challenger/version"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/cliapp" "github.com/ethereum-optimism/optimism/op-service/cliapp"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
) )
var ( var (
...@@ -26,14 +28,15 @@ var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDat ...@@ -26,14 +28,15 @@ var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDat
func main() { func main() {
args := os.Args args := os.Args
if err := run(args, op_challenger.Main); err != nil { ctx := opio.WithInterruptBlocker(context.Background())
if err := run(ctx, args, challenger.Main); err != nil {
log.Crit("Application failed", "err", err) log.Crit("Application failed", "err", err)
} }
} }
type ConfigAction func(ctx context.Context, log log.Logger, config *config.Config) error type ConfiguredLifecycle func(ctx context.Context, log log.Logger, config *config.Config) (cliapp.Lifecycle, error)
func run(args []string, action ConfigAction) error { func run(ctx context.Context, args []string, action ConfiguredLifecycle) error {
oplog.SetupDefaults() oplog.SetupDefaults()
app := cli.NewApp() app := cli.NewApp()
...@@ -42,20 +45,20 @@ func run(args []string, action ConfigAction) error { ...@@ -42,20 +45,20 @@ func run(args []string, action ConfigAction) error {
app.Name = "op-challenger" app.Name = "op-challenger"
app.Usage = "Challenge outputs" app.Usage = "Challenge outputs"
app.Description = "Ensures that on chain outputs are correct." app.Description = "Ensures that on chain outputs are correct."
app.Action = func(ctx *cli.Context) error { app.Action = cliapp.LifecycleCmd(func(ctx *cli.Context, close context.CancelCauseFunc) (cliapp.Lifecycle, error) {
logger, err := setupLogging(ctx) logger, err := setupLogging(ctx)
if err != nil { if err != nil {
return err return nil, err
} }
logger.Info("Starting op-challenger", "version", VersionWithMeta) logger.Info("Starting op-challenger", "version", VersionWithMeta)
cfg, err := flags.NewConfigFromCLI(ctx) cfg, err := flags.NewConfigFromCLI(ctx)
if err != nil { if err != nil {
return err return nil, err
} }
return action(ctx.Context, logger, cfg) return action(ctx.Context, logger, cfg)
} })
return app.Run(args) return app.RunContext(ctx, args)
} }
func setupLogging(ctx *cli.Context) (log.Logger, error) { func setupLogging(ctx *cli.Context) (log.Logger, error) {
......
...@@ -2,15 +2,19 @@ package main ...@@ -2,15 +2,19 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
var ( var (
...@@ -36,7 +40,7 @@ func TestLogLevel(t *testing.T) { ...@@ -36,7 +40,7 @@ func TestLogLevel(t *testing.T) {
for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} { for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} {
lvl := lvl lvl := lvl
t.Run("AcceptValid_"+lvl, func(t *testing.T) { t.Run("AcceptValid_"+lvl, func(t *testing.T) {
logger, _, err := runWithArgs(addRequiredArgs(config.TraceTypeAlphabet, "--log.level", lvl)) logger, _, err := dryRunWithArgs(addRequiredArgs(config.TraceTypeAlphabet, "--log.level", lvl))
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, logger) require.NotNil(t, logger)
}) })
...@@ -431,25 +435,29 @@ func TestCannonL2Genesis(t *testing.T) { ...@@ -431,25 +435,29 @@ func TestCannonL2Genesis(t *testing.T) {
} }
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) { func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs) _, _, err := dryRunWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains) require.ErrorContains(t, err, messageContains)
} }
func configForArgs(t *testing.T, cliArgs []string) config.Config { func configForArgs(t *testing.T, cliArgs []string) config.Config {
_, cfg, err := runWithArgs(cliArgs) _, cfg, err := dryRunWithArgs(cliArgs)
require.NoError(t, err) require.NoError(t, err)
return cfg return cfg
} }
func runWithArgs(cliArgs []string) (log.Logger, config.Config, error) { func dryRunWithArgs(cliArgs []string) (log.Logger, config.Config, error) {
cfg := new(config.Config) cfg := new(config.Config)
var logger log.Logger var logger log.Logger
fullArgs := append([]string{"op-challenger"}, cliArgs...) fullArgs := append([]string{"op-challenger"}, cliArgs...)
err := run(fullArgs, func(ctx context.Context, log log.Logger, config *config.Config) error { testErr := errors.New("dry-run")
err := run(context.Background(), fullArgs, func(ctx context.Context, log log.Logger, config *config.Config) (cliapp.Lifecycle, error) {
logger = log logger = log
cfg = config cfg = config
return nil return nil, testErr
}) })
if errors.Is(err, testErr) { // expected error
err = nil
}
return logger, *cfg, err return logger, *cfg, err
} }
......
...@@ -232,7 +232,7 @@ func (f *FaultDisputeGameContract) addLocalDataTx(data *types.PreimageOracleData ...@@ -232,7 +232,7 @@ func (f *FaultDisputeGameContract) addLocalDataTx(data *types.PreimageOracleData
call := f.contract.Call( call := f.contract.Call(
methodAddLocalData, methodAddLocalData,
data.GetIdent(), data.GetIdent(),
new(big.Int).SetBytes(data.LocalContext.Bytes()), data.LocalContext,
new(big.Int).SetUint64(uint64(data.OracleOffset)), new(big.Int).SetUint64(uint64(data.OracleOffset)),
) )
return call.ToTxCandidate() return call.ToTxCandidate()
......
...@@ -260,7 +260,7 @@ func TestUpdateOracleTx(t *testing.T) { ...@@ -260,7 +260,7 @@ func TestUpdateOracleTx(t *testing.T) {
} }
stubRpc.SetResponse(fdgAddr, methodAddLocalData, batching.BlockLatest, []interface{}{ stubRpc.SetResponse(fdgAddr, methodAddLocalData, batching.BlockLatest, []interface{}{
data.GetIdent(), data.GetIdent(),
new(big.Int).SetBytes(data.LocalContext.Bytes()), data.LocalContext,
new(big.Int).SetUint64(uint64(data.OracleOffset)), new(big.Int).SetUint64(uint64(data.OracleOffset)),
}, nil) }, nil)
tx, err := game.UpdateOracleTx(context.Background(), data) tx, err := game.UpdateOracleTx(context.Background(), data)
......
...@@ -65,7 +65,7 @@ func registerOutputCannon( ...@@ -65,7 +65,7 @@ func registerOutputCannon(
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
accessor, err := outputs.NewOutputCannonTraceAccessor(ctx, logger, cfg.RollupRpc, gameDepth, agreed.L2BlockNumber.Uint64(), disputed.L2BlockNumber.Uint64()) accessor, err := outputs.NewOutputCannonTraceAccessor(ctx, logger, m, cfg, contract, dir, gameDepth, agreed.L2BlockNumber.Uint64(), disputed.L2BlockNumber.Uint64())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
......
...@@ -232,6 +232,9 @@ func (m *mockTxManager) From() common.Address { ...@@ -232,6 +232,9 @@ func (m *mockTxManager) From() common.Address {
return m.from return m.from
} }
func (m *mockTxManager) Close() {
}
type mockContract struct { type mockContract struct {
calls int calls int
callFails bool callFails bool
......
...@@ -23,21 +23,29 @@ type L2DataSource interface { ...@@ -23,21 +23,29 @@ type L2DataSource interface {
HeaderByNumber(context.Context, *big.Int) (*ethtypes.Header, error) HeaderByNumber(context.Context, *big.Int) (*ethtypes.Header, error)
} }
type GameInputsSource interface { type L1HeadSource interface {
GetL1Head(ctx context.Context) (common.Hash, error) GetL1Head(ctx context.Context) (common.Hash, error)
}
type GameInputsSource interface {
L1HeadSource
GetProposals(ctx context.Context) (agreed contracts.Proposal, disputed contracts.Proposal, err error) GetProposals(ctx context.Context) (agreed contracts.Proposal, disputed contracts.Proposal, err error)
} }
func fetchLocalInputs(ctx context.Context, caller GameInputsSource, l2Client L2DataSource) (LocalGameInputs, error) { func fetchLocalInputs(ctx context.Context, caller GameInputsSource, l2Client L2DataSource) (LocalGameInputs, error) {
l1Head, err := caller.GetL1Head(ctx) agreedOutput, claimedOutput, err := caller.GetProposals(ctx)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch L1 head: %w", err) return LocalGameInputs{}, fmt.Errorf("fetch proposals: %w", err)
} }
return fetchLocalInputsFromProposals(ctx, caller, l2Client, agreedOutput, claimedOutput)
}
agreedOutput, claimedOutput, err := caller.GetProposals(ctx) func fetchLocalInputsFromProposals(ctx context.Context, caller L1HeadSource, l2Client L2DataSource, agreedOutput contracts.Proposal, claimedOutput contracts.Proposal) (LocalGameInputs, error) {
l1Head, err := caller.GetL1Head(ctx)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch proposals: %w", err) return LocalGameInputs{}, fmt.Errorf("fetch L1 head: %w", err)
} }
agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber) agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err) return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err)
......
...@@ -42,6 +42,36 @@ func TestFetchLocalInputs(t *testing.T) { ...@@ -42,6 +42,36 @@ func TestFetchLocalInputs(t *testing.T) {
require.Equal(t, contract.disputed.L2BlockNumber, inputs.L2BlockNumber) require.Equal(t, contract.disputed.L2BlockNumber, inputs.L2BlockNumber)
} }
func TestFetchLocalInputsFromProposals(t *testing.T) {
ctx := context.Background()
agreed := contracts.Proposal{
L2BlockNumber: big.NewInt(2222),
OutputRoot: common.Hash{0xdd},
}
claimed := contracts.Proposal{
L2BlockNumber: big.NewInt(3333),
OutputRoot: common.Hash{0xee},
}
contract := &mockGameInputsSource{
l1Head: common.Hash{0xcc},
}
l2Client := &mockL2DataSource{
chainID: big.NewInt(88422),
header: ethtypes.Header{
Number: agreed.L2BlockNumber,
},
}
inputs, err := fetchLocalInputsFromProposals(ctx, contract, l2Client, agreed, claimed)
require.NoError(t, err)
require.Equal(t, contract.l1Head, inputs.L1Head)
require.Equal(t, l2Client.header.Hash(), inputs.L2Head)
require.EqualValues(t, agreed.OutputRoot, inputs.L2OutputRoot)
require.EqualValues(t, claimed.OutputRoot, inputs.L2Claim)
require.Equal(t, claimed.L2BlockNumber, inputs.L2BlockNumber)
}
type mockGameInputsSource struct { type mockGameInputsSource struct {
l1Head common.Hash l1Head common.Hash
starting contracts.Proposal starting contracts.Proposal
......
...@@ -69,6 +69,30 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, ...@@ -69,6 +69,30 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer,
return NewTraceProviderFromInputs(logger, m, cfg, localContext, localInputs, dir, gameDepth), nil return NewTraceProviderFromInputs(logger, m, cfg, localContext, localInputs, dir, gameDepth), nil
} }
func NewTraceProviderFromProposals(
ctx context.Context,
logger log.Logger,
m CannonMetricer,
cfg *config.Config,
gameContract *contracts.FaultDisputeGameContract,
localContext common.Hash,
agreed contracts.Proposal,
claimed contracts.Proposal,
dir string,
gameDepth uint64,
) (*CannonTraceProvider, error) {
l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2)
if err != nil {
return nil, fmt.Errorf("dial l2 client %v: %w", cfg.CannonL2, err)
}
defer l2Client.Close() // Not needed after fetching the inputs
localInputs, err := fetchLocalInputsFromProposals(ctx, gameContract, l2Client, agreed, claimed)
if err != nil {
return nil, fmt.Errorf("fetch local game inputs: %w", err)
}
return NewTraceProviderFromInputs(logger, m, cfg, localContext, localInputs, dir, gameDepth), nil
}
func NewTraceProviderFromInputs(logger log.Logger, m CannonMetricer, cfg *config.Config, localContext common.Hash, localInputs LocalGameInputs, dir string, gameDepth uint64) *CannonTraceProvider { func NewTraceProviderFromInputs(logger log.Logger, m CannonMetricer, cfg *config.Config, localContext common.Hash, localInputs LocalGameInputs, dir string, gameDepth uint64) *CannonTraceProvider {
return &CannonTraceProvider{ return &CannonTraceProvider{
logger: logger, logger: logger,
......
...@@ -2,28 +2,50 @@ package outputs ...@@ -2,28 +2,50 @@ package outputs
import ( import (
"context" "context"
"errors" "fmt"
"path/filepath"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
func NewOutputCannonTraceAccessor(ctx context.Context, logger log.Logger, rollupRpc string, gameDepth uint64, prestateBlock uint64, poststateBlock uint64) (*trace.Accessor, error) { func NewOutputCannonTraceAccessor(
topDepth := gameDepth / 2 // TODO(client-pod#43): Load this from the contract ctx context.Context,
outputProvider, err := NewTraceProvider(ctx, logger, rollupRpc, topDepth, prestateBlock, poststateBlock) logger log.Logger,
m metrics.Metricer,
cfg *config.Config,
contract *contracts.FaultDisputeGameContract,
dir string,
gameDepth uint64,
prestateBlock uint64,
poststateBlock uint64,
) (*trace.Accessor, error) {
// TODO(client-pod#43): Load depths from the contract
topDepth := gameDepth / 2
bottomDepth := gameDepth - topDepth
outputProvider, err := NewTraceProvider(ctx, logger, cfg.RollupRpc, topDepth, prestateBlock, poststateBlock)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cannonCreator := func(ctx context.Context, localContext common.Hash, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { cannonCreator := func(ctx context.Context, localContext common.Hash, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) {
// TODO(client-pod#43): Actually create the cannon trace provider for the trace between the given claims. logger := logger.New("pre", agreed.OutputRoot, "post", claimed.OutputRoot, "localContext", localContext)
return nil, errors.New("not implemented") subdir := filepath.Join(dir, localContext.Hex())
provider, err := cannon.NewTraceProviderFromProposals(ctx, logger, m, cfg, contract, localContext, agreed, claimed, subdir, bottomDepth)
if err != nil {
return nil, fmt.Errorf("failed to create cannon trace provider: %w", err)
}
return provider, nil
} }
selector := split.NewSplitProviderSelector(outputProvider, int(topDepth), OutputRootSplitAdapter(outputProvider, cannonCreator)) cache := NewProviderCache(m, "output_cannon_provider", cannonCreator)
selector := split.NewSplitProviderSelector(outputProvider, int(topDepth), OutputRootSplitAdapter(outputProvider, cache.GetOrCreate))
return trace.NewAccessor(selector), nil return trace.NewAccessor(selector), nil
} }
package outputs
import (
"context"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum/go-ethereum/common"
)
type ProviderCache struct {
cache *caching.LRUCache[common.Hash, types.TraceProvider]
creator ProposalTraceProviderCreator
}
func (c *ProviderCache) GetOrCreate(ctx context.Context, localContext common.Hash, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) {
provider, ok := c.cache.Get(localContext)
if ok {
return provider, nil
}
provider, err := c.creator(ctx, localContext, agreed, claimed)
if err != nil {
return nil, err
}
c.cache.Add(localContext, provider)
return provider, nil
}
func NewProviderCache(m caching.Metrics, metricsLabel string, creator ProposalTraceProviderCreator) *ProviderCache {
cache := caching.NewLRUCache[common.Hash, types.TraceProvider](m, metricsLabel, 100)
return &ProviderCache{
cache: cache,
creator: creator,
}
}
package outputs
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestProviderCache(t *testing.T) {
agreed := contracts.Proposal{
L2BlockNumber: big.NewInt(34),
OutputRoot: common.Hash{0xaa},
}
claimed := contracts.Proposal{
L2BlockNumber: big.NewInt(35),
OutputRoot: common.Hash{0xcc},
}
var createdProvider types.TraceProvider
creator := func(ctx context.Context, localContext common.Hash, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) {
createdProvider = alphabet.NewTraceProvider("abcdef", 6)
return createdProvider, nil
}
localContext1 := common.Hash{0xdd}
localContext2 := common.Hash{0xee}
cache := NewProviderCache(metrics.NoopMetrics, "test", creator)
// Create on first call
provider1, err := cache.GetOrCreate(context.Background(), localContext1, agreed, claimed)
require.NoError(t, err)
require.Same(t, createdProvider, provider1, "should return created trace provider")
// Return the cached provider on subsequent calls.
createdProvider = nil
cached, err := cache.GetOrCreate(context.Background(), localContext1, agreed, claimed)
require.NoError(t, err)
require.Same(t, provider1, cached, "should return exactly the same instance from cache")
require.Nil(t, createdProvider)
// Create a new provider when the local context is different
createdProvider = nil
otherProvider, err := cache.GetOrCreate(context.Background(), localContext2, agreed, claimed)
require.NoError(t, err)
require.Same(t, otherProvider, createdProvider, "should return newly created trace provider")
require.NotSame(t, otherProvider, provider1, "should not use cached provider for different local context")
}
func TestProviderCache_DoNotCacheErrors(t *testing.T) {
callCount := 0
providerErr := errors.New("boom")
creator := func(ctx context.Context, localContext common.Hash, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) {
callCount++
return nil, providerErr
}
localContext1 := common.Hash{0xdd}
cache := NewProviderCache(metrics.NoopMetrics, "test", creator)
provider, err := cache.GetOrCreate(context.Background(), localContext1, contracts.Proposal{}, contracts.Proposal{})
require.Nil(t, provider)
require.ErrorIs(t, err, providerErr)
require.Equal(t, 1, callCount)
// Should call the creator again on the second attempt
provider, err = cache.GetOrCreate(context.Background(), localContext1, contracts.Proposal{}, contracts.Proposal{})
require.Nil(t, provider)
require.ErrorIs(t, err, providerErr)
require.Equal(t, 2, callCount)
}
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler" "github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
...@@ -39,6 +40,7 @@ type gameMonitor struct { ...@@ -39,6 +40,7 @@ type gameMonitor struct {
allowedGames []common.Address allowedGames []common.Address
l1HeadsSub ethereum.Subscription l1HeadsSub ethereum.Subscription
l1Source *headSource l1Source *headSource
runState sync.Mutex
} }
type MinimalSubscriber interface { type MinimalSubscriber interface {
...@@ -126,8 +128,10 @@ func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) { ...@@ -126,8 +128,10 @@ func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
} }
} }
func (m *gameMonitor) resubscribeFunction(ctx context.Context) event.ResubscribeErrFunc { func (m *gameMonitor) resubscribeFunction() event.ResubscribeErrFunc {
return func(innerCtx context.Context, err error) (event.Subscription, error) { // The ctx is cancelled as soon as the subscription is returned,
// but is only used to create the subscription, and does not affect the returned subscription.
return func(ctx context.Context, err error) (event.Subscription, error) {
if err != nil { if err != nil {
m.logger.Warn("resubscribing after failed L1 subscription", "err", err) m.logger.Warn("resubscribing after failed L1 subscription", "err", err)
} }
...@@ -135,18 +139,21 @@ func (m *gameMonitor) resubscribeFunction(ctx context.Context) event.Resubscribe ...@@ -135,18 +139,21 @@ func (m *gameMonitor) resubscribeFunction(ctx context.Context) event.Resubscribe
} }
} }
func (m *gameMonitor) MonitorGames(ctx context.Context) error { func (m *gameMonitor) StartMonitoring() {
m.l1HeadsSub = event.ResubscribeErr(time.Second*10, m.resubscribeFunction(ctx)) m.runState.Lock()
for { defer m.runState.Unlock()
select { if m.l1HeadsSub != nil {
case <-ctx.Done(): return // already started
m.l1HeadsSub.Unsubscribe()
return nil
case err, ok := <-m.l1HeadsSub.Err():
if !ok {
return err
}
m.logger.Error("L1 subscription error", "err", err)
} }
m.l1HeadsSub = event.ResubscribeErr(time.Second*10, m.resubscribeFunction())
}
func (m *gameMonitor) StopMonitoring() {
m.runState.Lock()
defer m.runState.Unlock()
if m.l1HeadsSub == nil {
return // already stopped
} }
m.l1HeadsSub.Unsubscribe()
m.l1HeadsSub = nil
} }
...@@ -84,8 +84,9 @@ func TestMonitorGames(t *testing.T) { ...@@ -84,8 +84,9 @@ func TestMonitorGames(t *testing.T) {
cancel() cancel()
}() }()
err := monitor.MonitorGames(ctx) monitor.StartMonitoring()
require.NoError(t, err) <-ctx.Done()
monitor.StopMonitoring()
require.Len(t, sched.scheduled, 1) require.Len(t, sched.scheduled, 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0]) require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
}) })
...@@ -129,8 +130,9 @@ func TestMonitorGames(t *testing.T) { ...@@ -129,8 +130,9 @@ func TestMonitorGames(t *testing.T) {
cancel() cancel()
}() }()
err := monitor.MonitorGames(ctx) monitor.StartMonitoring()
require.NoError(t, err) <-ctx.Done()
monitor.StopMonitoring()
require.NotEmpty(t, sched.scheduled) // We might get more than one update scheduled. require.NotEmpty(t, sched.scheduled) // We might get more than one update scheduled.
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0]) require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
}) })
......
This diff is collapsed.
package metrics package metrics
import ( import (
"context" "io"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -19,9 +20,14 @@ type Metricer interface { ...@@ -19,9 +20,14 @@ type Metricer interface {
RecordInfo(version string) RecordInfo(version string)
RecordUp() RecordUp()
StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer
// Record Tx metrics // Record Tx metrics
txmetrics.TxMetricer txmetrics.TxMetricer
// Record cache metrics
caching.Metrics
RecordGameStep() RecordGameStep()
RecordGameMove() RecordGameMove()
RecordCannonExecutionTime(t float64) RecordCannonExecutionTime(t float64)
...@@ -44,6 +50,8 @@ type Metrics struct { ...@@ -44,6 +50,8 @@ type Metrics struct {
txmetrics.TxMetrics txmetrics.TxMetrics
*opmetrics.CacheMetrics
info prometheus.GaugeVec info prometheus.GaugeVec
up prometheus.Gauge up prometheus.Gauge
...@@ -71,6 +79,8 @@ func NewMetrics() *Metrics { ...@@ -71,6 +79,8 @@ func NewMetrics() *Metrics {
TxMetrics: txmetrics.MakeTxMetrics(Namespace, factory), TxMetrics: txmetrics.MakeTxMetrics(Namespace, factory),
CacheMetrics: opmetrics.NewCacheMetrics(factory, Namespace, "provider_cache", "Provider cache"),
info: *factory.NewGaugeVec(prometheus.GaugeOpts{ info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace, Namespace: Namespace,
Name: "info", Name: "info",
...@@ -128,17 +138,11 @@ func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) { ...@@ -128,17 +138,11 @@ func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) {
} }
func (m *Metrics) StartBalanceMetrics( func (m *Metrics) StartBalanceMetrics(
ctx context.Context,
l log.Logger, l log.Logger,
client *ethclient.Client, client *ethclient.Client,
account common.Address, account common.Address,
) { ) io.Closer {
// TODO(7684): util was refactored to close, but ctx is still being used by caller for shutdown return opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account)
balanceMetric := opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account)
go func() {
<-ctx.Done()
_ = balanceMetric.Close()
}()
} }
// RecordInfo sets a pseudo-metric that contains versioning and // RecordInfo sets a pseudo-metric that contains versioning and
......
package metrics package metrics
import ( import (
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
) )
...@@ -8,6 +14,10 @@ type NoopMetricsImpl struct { ...@@ -8,6 +14,10 @@ type NoopMetricsImpl struct {
txmetrics.NoopTxMetrics txmetrics.NoopTxMetrics
} }
func (i *NoopMetricsImpl) StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer {
return nil
}
var NoopMetrics Metricer = new(NoopMetricsImpl) var NoopMetrics Metricer = new(NoopMetricsImpl)
func (*NoopMetricsImpl) RecordInfo(version string) {} func (*NoopMetricsImpl) RecordInfo(version string) {}
...@@ -27,3 +37,6 @@ func (*NoopMetricsImpl) IncActiveExecutors() {} ...@@ -27,3 +37,6 @@ func (*NoopMetricsImpl) IncActiveExecutors() {}
func (*NoopMetricsImpl) DecActiveExecutors() {} func (*NoopMetricsImpl) DecActiveExecutors() {}
func (*NoopMetricsImpl) IncIdleExecutors() {} func (*NoopMetricsImpl) IncIdleExecutors() {}
func (*NoopMetricsImpl) DecIdleExecutors() {} func (*NoopMetricsImpl) DecIdleExecutors() {}
func (*NoopMetricsImpl) CacheAdd(_ string, _ int, _ bool) {}
func (*NoopMetricsImpl) CacheGet(_ string, _ bool) {}
...@@ -54,6 +54,8 @@ func (f fakeTxMgr) BlockNumber(_ context.Context) (uint64, error) { ...@@ -54,6 +54,8 @@ func (f fakeTxMgr) BlockNumber(_ context.Context) (uint64, error) {
func (f fakeTxMgr) Send(_ context.Context, _ txmgr.TxCandidate) (*types.Receipt, error) { func (f fakeTxMgr) Send(_ context.Context, _ txmgr.TxCandidate) (*types.Receipt, error) {
panic("unimplemented") panic("unimplemented")
} }
func (f fakeTxMgr) Close() {
}
func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Client, rollupCl *sources.RollupClient) *L2Proposer { func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Client, rollupCl *sources.RollupClient) *L2Proposer {
proposerConfig := proposer.ProposerConfig{ proposerConfig := proposer.ProposerConfig{
......
...@@ -16,13 +16,13 @@ import ( ...@@ -16,13 +16,13 @@ import (
func TestMintOnRevertedDeposit(t *testing.T) { func TestMintOnRevertedDeposit(t *testing.T) {
InitParallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
delete(cfg.Nodes, "verifier")
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
l2Verif := sys.Clients["verifier"] l2Verif := sys.Clients["sequencer"]
// create signer // create signer
aliceKey := cfg.Secrets.Alice aliceKey := cfg.Secrets.Alice
......
...@@ -11,16 +11,19 @@ import ( ...@@ -11,16 +11,19 @@ import (
"testing" "testing"
"time" "time"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log"
challenger "github.com/ethereum-optimism/optimism/op-challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
) )
type Helper struct { type Helper struct {
...@@ -28,8 +31,7 @@ type Helper struct { ...@@ -28,8 +31,7 @@ type Helper struct {
t *testing.T t *testing.T
require *require.Assertions require *require.Assertions
dir string dir string
cancel func() chl cliapp.Lifecycle
errors chan error
} }
type Option func(config2 *config.Config) type Option func(config2 *config.Config)
...@@ -127,20 +129,16 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st ...@@ -127,20 +129,16 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st
log := testlog.Logger(t, log.LvlDebug).New("role", name) log := testlog.Logger(t, log.LvlDebug).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint) log.Info("Creating challenger", "l1", l1Endpoint)
cfg := NewChallengerConfig(t, l1Endpoint, options...) cfg := NewChallengerConfig(t, l1Endpoint, options...)
chl, err := challenger.Main(ctx, log, cfg)
require.NoError(t, err, "must init challenger")
require.NoError(t, chl.Start(ctx), "must start challenger")
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer close(errCh)
errCh <- op_challenger.Main(ctx, log, cfg)
}()
return &Helper{ return &Helper{
log: log, log: log,
t: t, t: t,
require: require.New(t), require: require.New(t),
dir: cfg.Datadir, dir: cfg.Datadir,
cancel: cancel, chl: chl,
errors: errCh,
} }
} }
...@@ -179,16 +177,9 @@ func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *co ...@@ -179,16 +177,9 @@ func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *co
} }
func (h *Helper) Close() error { func (h *Helper) Close() error {
h.cancel() ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
select { defer cancel()
case <-time.After(1 * time.Minute): return h.chl.Stop(ctx)
return errors.New("timed out while stopping challenger")
case err := <-h.errors:
if !errors.Is(err, context.Canceled) {
return err
}
return nil
}
} }
type GameAddr interface { type GameAddr interface {
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
) )
var ( var (
...@@ -80,7 +81,6 @@ func WaitForTransaction(hash common.Hash, client *ethclient.Client, timeout time ...@@ -80,7 +81,6 @@ func WaitForTransaction(hash common.Hash, client *ethclient.Client, timeout time
} }
func WaitForBlock(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) { func WaitForBlock(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
...@@ -99,8 +99,43 @@ func WaitForBlock(number *big.Int, client *ethclient.Client, timeout time.Durati ...@@ -99,8 +99,43 @@ func WaitForBlock(number *big.Int, client *ethclient.Client, timeout time.Durati
} }
case err := <-headSub.Err(): case err := <-headSub.Err():
return nil, fmt.Errorf("error in head subscription: %w", err) return nil, fmt.Errorf("error in head subscription: %w", err)
case <-timeoutCh: case <-ctx.Done():
return nil, errTimeout return nil, ctx.Err()
}
}
}
func WaitForBlockToBeFinalized(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
return waitForBlockTag(number, client, timeout, rpc.FinalizedBlockNumber)
}
func WaitForBlockToBeSafe(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
return waitForBlockTag(number, client, timeout, rpc.SafeBlockNumber)
}
// waitForBlockTag polls for a block number to reach the specified tag & then returns that block at the number.
func waitForBlockTag(number *big.Int, client *ethclient.Client, timeout time.Duration, tag rpc.BlockNumber) (*types.Block, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Wait for it to be finalized. Poll every half second.
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
tagBigInt := big.NewInt(tag.Int64())
for {
select {
case <-ticker.C:
block, err := client.BlockByNumber(ctx, tagBigInt)
if err != nil {
return nil, err
}
if block != nil && block.NumberU64() >= number.Uint64() {
return client.BlockByNumber(ctx, number)
}
case <-ctx.Done():
return nil, ctx.Err()
} }
} }
} }
...@@ -368,17 +368,8 @@ func TestFinalize(t *testing.T) { ...@@ -368,17 +368,8 @@ func TestFinalize(t *testing.T) {
l2Seq := sys.Clients["sequencer"] l2Seq := sys.Clients["sequencer"]
// as configured in the extra geth lifecycle in testing setup l2Finalized, err := geth.WaitForBlockToBeFinalized(big.NewInt(12), l2Seq, 1*time.Minute)
const finalizedDistance = 8 require.NoError(t, err, "must be able to fetch a finalized L2 block")
// Wait enough time for L1 to finalize and L2 to confirm its data in finalized L1 blocks
time.Sleep(time.Duration((finalizedDistance+6)*cfg.DeployConfig.L1BlockTime) * time.Second)
// fetch the finalizes head of geth
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
l2Finalized, err := l2Seq.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
require.NoError(t, err)
require.NotZerof(t, l2Finalized.NumberU64(), "must have finalized L2 block") require.NotZerof(t, l2Finalized.NumberU64(), "must have finalized L2 block")
} }
......
...@@ -47,7 +47,9 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l ...@@ -47,7 +47,9 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l
reconstructedDep, err := derive.UnmarshalDepositLogEvent(l1Receipt.Logs[0]) reconstructedDep, err := derive.UnmarshalDepositLogEvent(l1Receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit") require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep) tx = types.NewTx(reconstructedDep)
l2Receipt, err := geth.WaitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second) // Use a long wait because the l2Client may not be configured to receive gossip from the sequencer
// so has to wait for the batcher to submit and then import those blocks from L1.
l2Receipt, err := geth.WaitForTransaction(tx.Hash(), l2Client, 60*time.Second)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, l2Receipt.Status, "l2 transaction status") require.Equal(t, l2Opts.ExpectedStatus, l2Receipt.Status, "l2 transaction status")
return l2Receipt return l2Receipt
......
...@@ -174,7 +174,7 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error { ...@@ -174,7 +174,7 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
if err != nil { if err != nil {
n.log.Warn("resubscribing after failed L1 subscription", "err", err) n.log.Warn("resubscribing after failed L1 subscription", "err", err)
} }
return eth.WatchHeadChanges(n.resourcesCtx, n.l1Source, n.OnNewL1Head) return eth.WatchHeadChanges(ctx, n.l1Source, n.OnNewL1Head)
}) })
go func() { go func() {
err, ok := <-n.l1HeadsSub.Err() err, ok := <-n.l1HeadsSub.Err()
...@@ -186,9 +186,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error { ...@@ -186,9 +186,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
// Poll for the safe L1 block and finalized block, // Poll for the safe L1 block and finalized block,
// which only change once per epoch at most and may be delayed. // which only change once per epoch at most and may be delayed.
n.l1SafeSub = eth.PollBlockChanges(n.resourcesCtx, n.log, n.l1Source, n.OnNewL1Safe, eth.Safe, n.l1SafeSub = eth.PollBlockChanges(n.log, n.l1Source, n.OnNewL1Safe, eth.Safe,
cfg.L1EpochPollInterval, time.Second*10) cfg.L1EpochPollInterval, time.Second*10)
n.l1FinalizedSub = eth.PollBlockChanges(n.resourcesCtx, n.log, n.l1Source, n.OnNewL1Finalized, eth.Finalized, n.l1FinalizedSub = eth.PollBlockChanges(n.log, n.l1Source, n.OnNewL1Finalized, eth.Finalized,
cfg.L1EpochPollInterval, time.Second*10) cfg.L1EpochPollInterval, time.Second*10)
return nil return nil
} }
...@@ -582,6 +582,14 @@ func (n *OpNode) Stop(ctx context.Context) error { ...@@ -582,6 +582,14 @@ func (n *OpNode) Stop(ctx context.Context) error {
if n.l1HeadsSub != nil { if n.l1HeadsSub != nil {
n.l1HeadsSub.Unsubscribe() n.l1HeadsSub.Unsubscribe()
} }
// stop polling for L1 safe-head changes
if n.l1SafeSub != nil {
n.l1SafeSub.Unsubscribe()
}
// stop polling for L1 finalized-head changes
if n.l1FinalizedSub != nil {
n.l1FinalizedSub.Unsubscribe()
}
// close L2 driver // close L2 driver
if n.l2Driver != nil { if n.l2Driver != nil {
......
...@@ -94,7 +94,7 @@ func (d *recordsBook[K, V]) dsKey(key K) ds.Key { ...@@ -94,7 +94,7 @@ func (d *recordsBook[K, V]) dsKey(key K) ds.Key {
func (d *recordsBook[K, V]) deleteRecord(key K) error { func (d *recordsBook[K, V]) deleteRecord(key K) error {
d.cache.Remove(key) d.cache.Remove(key)
err := d.store.Delete(d.ctx, d.dsKey(key)) err := d.store.Delete(d.ctx, d.dsKey(key))
if errors.Is(err, ds.ErrNotFound) { if err == nil || errors.Is(err, ds.ErrNotFound) {
return nil return nil
} }
return fmt.Errorf("failed to delete entry with key %v: %w", key, err) return fmt.Errorf("failed to delete entry with key %v: %w", key, err)
......
...@@ -279,6 +279,11 @@ func (ps *ProposerService) Stop(ctx context.Context) error { ...@@ -279,6 +279,11 @@ func (ps *ProposerService) Stop(ctx context.Context) error {
result = errors.Join(result, fmt.Errorf("failed to close balance metricer: %w", err)) result = errors.Join(result, fmt.Errorf("failed to close balance metricer: %w", err))
} }
} }
if ps.TxManager != nil {
ps.TxManager.Close()
}
if ps.metricsSrv != nil { if ps.metricsSrv != nil {
if err := ps.metricsSrv.Stop(ctx); err != nil { if err := ps.metricsSrv.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err)) result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
......
...@@ -17,7 +17,8 @@ type NewHeadSource interface { ...@@ -17,7 +17,8 @@ type NewHeadSource interface {
SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error)
} }
// WatchHeadChanges wraps a new-head subscription from NewHeadSource to feed the given Tracker // WatchHeadChanges wraps a new-head subscription from NewHeadSource to feed the given Tracker.
// The ctx is only used to create the subscription, and does not affect the returned subscription.
func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) (ethereum.Subscription, error) { func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) (ethereum.Subscription, error) {
headChanges := make(chan *types.Header, 10) headChanges := make(chan *types.Header, 10)
sub, err := src.SubscribeNewHead(ctx, headChanges) sub, err := src.SubscribeNewHead(ctx, headChanges)
...@@ -25,22 +26,33 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) ( ...@@ -25,22 +26,33 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) (
return nil, err return nil, err
} }
return event.NewSubscription(func(quit <-chan struct{}) error { return event.NewSubscription(func(quit <-chan struct{}) error {
eventsCtx, eventsCancel := context.WithCancel(context.Background())
defer sub.Unsubscribe() defer sub.Unsubscribe()
defer eventsCancel()
// We can handle a quit signal while fn is running, by closing the ctx.
go func() {
select {
case <-quit:
eventsCancel()
case <-eventsCtx.Done(): // don't wait for quit signal if we closed for other reasons.
return
}
}()
for { for {
select { select {
case header := <-headChanges: case header := <-headChanges:
fn(ctx, L1BlockRef{ fn(eventsCtx, L1BlockRef{
Hash: header.Hash(), Hash: header.Hash(),
Number: header.Number.Uint64(), Number: header.Number.Uint64(),
ParentHash: header.ParentHash, ParentHash: header.ParentHash,
Time: header.Time, Time: header.Time,
}) })
case err := <-sub.Err(): case <-eventsCtx.Done():
return err
case <-ctx.Done():
return ctx.Err()
case <-quit:
return nil return nil
case err := <-sub.Err(): // if the underlying subscription fails, stop
return err
} }
} }
}), nil }), nil
...@@ -53,7 +65,7 @@ type L1BlockRefsSource interface { ...@@ -53,7 +65,7 @@ type L1BlockRefsSource interface {
// PollBlockChanges opens a polling loop to fetch the L1 block reference with the given label, // PollBlockChanges opens a polling loop to fetch the L1 block reference with the given label,
// on provided interval and with request timeout. Results are returned with provided callback fn, // on provided interval and with request timeout. Results are returned with provided callback fn,
// which may block to pause/back-pressure polling. // which may block to pause/back-pressure polling.
func PollBlockChanges(ctx context.Context, log log.Logger, src L1BlockRefsSource, fn HeadSignalFn, func PollBlockChanges(log log.Logger, src L1BlockRefsSource, fn HeadSignalFn,
label BlockLabel, interval time.Duration, timeout time.Duration) ethereum.Subscription { label BlockLabel, interval time.Duration, timeout time.Duration) ethereum.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error { return event.NewSubscription(func(quit <-chan struct{}) error {
if interval <= 0 { if interval <= 0 {
...@@ -61,22 +73,32 @@ func PollBlockChanges(ctx context.Context, log log.Logger, src L1BlockRefsSource ...@@ -61,22 +73,32 @@ func PollBlockChanges(ctx context.Context, log log.Logger, src L1BlockRefsSource
<-quit <-quit
return nil return nil
} }
eventsCtx, eventsCancel := context.WithCancel(context.Background())
defer eventsCancel()
// We can handle a quit signal while fn is running, by closing the ctx.
go func() {
select {
case <-quit:
eventsCancel()
case <-eventsCtx.Done(): // don't wait for quit signal if we closed for other reasons.
return
}
}()
ticker := time.NewTicker(interval) ticker := time.NewTicker(interval)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
reqCtx, reqCancel := context.WithTimeout(ctx, timeout) reqCtx, reqCancel := context.WithTimeout(eventsCtx, timeout)
ref, err := src.L1BlockRefByLabel(reqCtx, label) ref, err := src.L1BlockRefByLabel(reqCtx, label)
reqCancel() reqCancel()
if err != nil { if err != nil {
log.Warn("failed to poll L1 block", "label", label, "err", err) log.Warn("failed to poll L1 block", "label", label, "err", err)
} else { } else {
fn(ctx, ref) fn(eventsCtx, ref)
} }
case <-ctx.Done(): case <-eventsCtx.Done():
return ctx.Err()
case <-quit:
return nil return nil
} }
} }
......
...@@ -43,6 +43,11 @@ func (_m *TxManager) BlockNumber(ctx context.Context) (uint64, error) { ...@@ -43,6 +43,11 @@ func (_m *TxManager) BlockNumber(ctx context.Context) (uint64, error) {
return r0, r1 return r0, r1
} }
// Close provides a mock function with given fields:
func (_m *TxManager) Close() {
_m.Called()
}
// From provides a mock function with given fields: // From provides a mock function with given fields:
func (_m *TxManager) From() common.Address { func (_m *TxManager) From() common.Address {
ret := _m.Called() ret := _m.Called()
......
...@@ -49,6 +49,9 @@ type TxManager interface { ...@@ -49,6 +49,9 @@ type TxManager interface {
// BlockNumber returns the most recent block number from the underlying network. // BlockNumber returns the most recent block number from the underlying network.
BlockNumber(ctx context.Context) (uint64, error) BlockNumber(ctx context.Context) (uint64, error)
// Close the underlying connection
Close()
} }
// ETHBackend is the set of methods that the transaction manager uses to resubmit gas & determine // ETHBackend is the set of methods that the transaction manager uses to resubmit gas & determine
...@@ -80,6 +83,8 @@ type ETHBackend interface { ...@@ -80,6 +83,8 @@ type ETHBackend interface {
// EstimateGas returns an estimate of the amount of gas needed to execute the given // EstimateGas returns an estimate of the amount of gas needed to execute the given
// transaction against the current pending block. // transaction against the current pending block.
EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error)
// Close the underlying eth connection
Close()
} }
// SimpleTxManager is a implementation of TxManager that performs linear fee // SimpleTxManager is a implementation of TxManager that performs linear fee
...@@ -131,6 +136,10 @@ func (m *SimpleTxManager) BlockNumber(ctx context.Context) (uint64, error) { ...@@ -131,6 +136,10 @@ func (m *SimpleTxManager) BlockNumber(ctx context.Context) (uint64, error) {
return m.backend.BlockNumber(ctx) return m.backend.BlockNumber(ctx)
} }
func (m *SimpleTxManager) Close() {
m.backend.Close()
}
// TxCandidate is a transaction candidate that can be submitted to ask the // TxCandidate is a transaction candidate that can be submitted to ask the
// [TxManager] to construct a transaction with gas price bounds. // [TxManager] to construct a transaction with gas price bounds.
type TxCandidate struct { type TxCandidate struct {
......
...@@ -261,6 +261,9 @@ func (b *mockBackend) TransactionReceipt(ctx context.Context, txHash common.Hash ...@@ -261,6 +261,9 @@ func (b *mockBackend) TransactionReceipt(ctx context.Context, txHash common.Hash
}, nil }, nil
} }
func (b *mockBackend) Close() {
}
// TestTxMgrConfirmAtMinGasPrice asserts that Send returns the min gas price tx // TestTxMgrConfirmAtMinGasPrice asserts that Send returns the min gas price tx
// if the tx is mined instantly. // if the tx is mined instantly.
func TestTxMgrConfirmAtMinGasPrice(t *testing.T) { func TestTxMgrConfirmAtMinGasPrice(t *testing.T) {
...@@ -755,6 +758,9 @@ func (b *failingBackend) ChainID(ctx context.Context) (*big.Int, error) { ...@@ -755,6 +758,9 @@ func (b *failingBackend) ChainID(ctx context.Context) (*big.Int, error) {
return nil, errors.New("unimplemented") return nil, errors.New("unimplemented")
} }
func (b *failingBackend) Close() {
}
// TestWaitMinedReturnsReceiptAfterFailure asserts that WaitMined is able to // TestWaitMinedReturnsReceiptAfterFailure asserts that WaitMined is able to
// recover from failed calls to the backend. It uses the failedBackend to // recover from failed calls to the backend. It uses the failedBackend to
// simulate an rpc call failure, followed by the successful return of a receipt. // simulate an rpc call failure, followed by the successful return of a receipt.
......
...@@ -38,13 +38,13 @@ ...@@ -38,13 +38,13 @@
"upgrade:abigen": "abigen --version | sed -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -e 's/ /./g' -e 's/^/v/' > .abigenrc" "upgrade:abigen": "abigen --version | sed -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -e 's/ /./g' -e 's/^/v/' > .abigenrc"
}, },
"devDependencies": { "devDependencies": {
"@babel/eslint-parser": "^7.18.2", "@babel/eslint-parser": "^7.23.3",
"@changesets/changelog-github": "^0.4.8", "@changesets/changelog-github": "^0.4.8",
"@types/chai": "^4.3.10", "@types/chai": "^4.3.10",
"@types/chai-as-promised": "^7.1.8", "@types/chai-as-promised": "^7.1.8",
"@types/mocha": "^10.0.4", "@types/mocha": "^10.0.6",
"@types/node": "^20.9.0", "@types/node": "^20.9.3",
"@typescript-eslint/eslint-plugin": "^6.11.0", "@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.11.0", "@typescript-eslint/parser": "^6.11.0",
"chai": "^4.3.10", "chai": "^4.3.10",
"depcheck": "^1.4.7", "depcheck": "^1.4.7",
...@@ -65,13 +65,13 @@ ...@@ -65,13 +65,13 @@
"markdownlint": "^0.32.0", "markdownlint": "^0.32.0",
"markdownlint-cli2": "0.4.0", "markdownlint-cli2": "0.4.0",
"mocha": "^10.2.0", "mocha": "^10.2.0",
"nx": "17.0.3", "nx": "17.1.3",
"nyc": "^15.1.0", "nyc": "^15.1.0",
"patch-package": "^8.0.0", "patch-package": "^8.0.0",
"prettier": "^2.8.0", "prettier": "^2.8.0",
"rimraf": "^5.0.5", "rimraf": "^5.0.5",
"ts-mocha": "^10.0.0", "ts-mocha": "^10.0.0",
"typescript": "^5.2.2", "typescript": "^5.3.2",
"nx-cloud": "latest" "nx-cloud": "latest"
}, },
"dependencies": { "dependencies": {
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.5.4
### Patch Changes
- Updated dependencies [[`dd0e46986`](https://github.com/ethereum-optimism/optimism/commit/dd0e46986f19dcceb304fc48f2bd410685ecd179)]:
- @eth-optimism/sdk@3.1.6
## 0.5.3 ## 0.5.3
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/chain-mon", "name": "@eth-optimism/chain-mon",
"version": "0.5.3", "version": "0.5.4",
"description": "[Optimism] Chain monitoring services", "description": "[Optimism] Chain monitoring services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -61,8 +61,8 @@ ...@@ -61,8 +61,8 @@
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@nomiclabs/hardhat-ethers": "^2.2.3", "@nomiclabs/hardhat-ethers": "^2.2.3",
"@nomiclabs/hardhat-waffle": "^2.0.6", "@nomiclabs/hardhat-waffle": "^2.0.6",
"hardhat": "^2.19.0", "hardhat": "^2.19.1",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"tsx": "^4.1.1" "tsx": "^4.3.0"
} }
} }
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
"@types/express": "^4.17.21", "@types/express": "^4.17.21",
"@types/morgan": "^1.9.9", "@types/morgan": "^1.9.9",
"@types/pino": "^7.0.5", "@types/pino": "^7.0.5",
"@types/pino-multi-stream": "^5.1.5", "@types/pino-multi-stream": "^5.1.6",
"chai": "^4.3.10", "chai": "^4.3.10",
"supertest": "^6.3.3" "supertest": "^6.3.3"
} }
......
This diff is collapsed.
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
"coverage": "pnpm build:go-ffi && forge coverage", "coverage": "pnpm build:go-ffi && forge coverage",
"coverage:lcov": "pnpm build:go-ffi && forge coverage --report lcov", "coverage:lcov": "pnpm build:go-ffi && forge coverage --report lcov",
"deploy": "./scripts/deploy.sh", "deploy": "./scripts/deploy.sh",
"gas-snapshot:no-build": "forge snapshot --no-match-test 'testDiff|testFuzz|invariant|generateArtifact' --no-match-contract 'Initializer_Test'", "gas-snapshot:no-build": "forge snapshot --match-contract GasBenchMark",
"gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build", "gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build",
"storage-snapshot": "./scripts/storage-snapshot.sh", "storage-snapshot": "./scripts/storage-snapshot.sh",
"semver-lock": "forge script scripts/SemverLock.s.sol", "semver-lock": "forge script scripts/SemverLock.s.sol",
...@@ -45,9 +45,9 @@ ...@@ -45,9 +45,9 @@
"lint": "pnpm lint:fix && pnpm lint:check" "lint": "pnpm lint:fix && pnpm lint:check"
}, },
"devDependencies": { "devDependencies": {
"@typescript-eslint/eslint-plugin": "^6.11.0", "@typescript-eslint/eslint-plugin": "^6.12.0",
"@typescript-eslint/parser": "^6.11.0", "@typescript-eslint/parser": "^6.11.0",
"tsx": "^4.1.1", "tsx": "^4.3.0",
"typescript": "^5.2.2" "typescript": "^5.3.2"
} }
} }
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
"src/Safe/LivenessModule.sol": "0x45621d74ea464c75064f9194261d29d47552cf4a9c4f4b3a733f5df5803fc0dd", "src/Safe/LivenessModule.sol": "0x45621d74ea464c75064f9194261d29d47552cf4a9c4f4b3a733f5df5803fc0dd",
"src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb", "src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb",
"src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270", "src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270",
"src/dispute/FaultDisputeGame.sol": "0x0766707ab32338a6586c2340ddfbfd4e9023eeb9dfa3ef87e4b404fb0260479f", "src/dispute/FaultDisputeGame.sol": "0x7ac7553a47d96a4481a6b95363458bed5f160112b647829c4defc134fa178d9a",
"src/legacy/DeployerWhitelist.sol": "0x0a6840074734c9d167321d3299be18ef911a415e4c471fa92af7d6cfaa8336d4", "src/legacy/DeployerWhitelist.sol": "0x0a6840074734c9d167321d3299be18ef911a415e4c471fa92af7d6cfaa8336d4",
"src/legacy/L1BlockNumber.sol": "0x20d83a636c5e2067fca8c0ed505b295174e6eddb25960d8705e6b6fea8e77fa6", "src/legacy/L1BlockNumber.sol": "0x20d83a636c5e2067fca8c0ed505b295174e6eddb25960d8705e6b6fea8e77fa6",
"src/legacy/LegacyMessagePasser.sol": "0x80f355c9710af586f58cf6a86d1925e0073d1e504d0b3d814284af1bafe4dece", "src/legacy/LegacyMessagePasser.sol": "0x80f355c9710af586f58cf6a86d1925e0073d1e504d0b3d814284af1bafe4dece",
......
...@@ -143,7 +143,7 @@ contract MIPS { ...@@ -143,7 +143,7 @@ contract MIPS {
/// @notice Handles a syscall. /// @notice Handles a syscall.
/// @param _localContext The local key context for the preimage oracle. /// @param _localContext The local key context for the preimage oracle.
/// @return out_ The hashed MIPS state. /// @return out_ The hashed MIPS state.
function handleSyscall(uint256 _localContext) internal returns (bytes32 out_) { function handleSyscall(bytes32 _localContext) internal returns (bytes32 out_) {
unchecked { unchecked {
// Load state from memory // Load state from memory
State memory state; State memory state;
...@@ -626,7 +626,7 @@ contract MIPS { ...@@ -626,7 +626,7 @@ contract MIPS {
/// @param _proof The encoded proof data for leaves within the MIPS VM's memory. /// @param _proof The encoded proof data for leaves within the MIPS VM's memory.
/// @param _localContext The local key context for the preimage oracle. Optional, can be set as a constant /// @param _localContext The local key context for the preimage oracle. Optional, can be set as a constant
/// if the caller only requires one set of local keys. /// if the caller only requires one set of local keys.
function step(bytes calldata _stateData, bytes calldata _proof, uint256 _localContext) public returns (bytes32) { function step(bytes calldata _stateData, bytes calldata _proof, bytes32 _localContext) public returns (bytes32) {
unchecked { unchecked {
State memory state; State memory state;
......
...@@ -9,7 +9,7 @@ library PreimageKeyLib { ...@@ -9,7 +9,7 @@ library PreimageKeyLib {
/// @param _ident The identifier of the local data. [0, 32) bytes in size. /// @param _ident The identifier of the local data. [0, 32) bytes in size.
/// @param _localContext The local context for the key. /// @param _localContext The local context for the key.
/// @return key_ The context-specific local key. /// @return key_ The context-specific local key.
function localizeIdent(uint256 _ident, uint256 _localContext) internal view returns (bytes32 key_) { function localizeIdent(uint256 _ident, bytes32 _localContext) internal view returns (bytes32 key_) {
assembly { assembly {
// Set the type byte in the given identifier to `1` (Local). We only care about // Set the type byte in the given identifier to `1` (Local). We only care about
// the [1, 32) bytes in this value. // the [1, 32) bytes in this value.
...@@ -26,7 +26,7 @@ library PreimageKeyLib { ...@@ -26,7 +26,7 @@ library PreimageKeyLib {
/// @param _key The local data key to localize. /// @param _key The local data key to localize.
/// @param _localContext The local context for the key. /// @param _localContext The local context for the key.
/// @return localizedKey_ The localized local data key. /// @return localizedKey_ The localized local data key.
function localize(bytes32 _key, uint256 _localContext) internal view returns (bytes32 localizedKey_) { function localize(bytes32 _key, bytes32 _localContext) internal view returns (bytes32 localizedKey_) {
assembly { assembly {
// Grab the current free memory pointer to restore later. // Grab the current free memory pointer to restore later.
let ptr := mload(0x40) let ptr := mload(0x40)
......
...@@ -34,7 +34,7 @@ contract PreimageOracle is IPreimageOracle { ...@@ -34,7 +34,7 @@ contract PreimageOracle is IPreimageOracle {
/// @inheritdoc IPreimageOracle /// @inheritdoc IPreimageOracle
function loadLocalData( function loadLocalData(
uint256 _ident, uint256 _ident,
uint256 _localContext, bytes32 _localContext,
bytes32 _word, bytes32 _word,
uint256 _size, uint256 _size,
uint256 _partOffset uint256 _partOffset
......
...@@ -34,7 +34,7 @@ interface IPreimageOracle { ...@@ -34,7 +34,7 @@ interface IPreimageOracle {
/// └────────────┴────────────────────────┘ /// └────────────┴────────────────────────┘
function loadLocalData( function loadLocalData(
uint256 _ident, uint256 _ident,
uint256 _localContext, bytes32 _localContext,
bytes32 _word, bytes32 _word,
uint256 _size, uint256 _size,
uint256 _partOffset uint256 _partOffset
......
...@@ -82,8 +82,8 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver { ...@@ -82,8 +82,8 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver {
bool internal subgameAtRootResolved; bool internal subgameAtRootResolved;
/// @notice Semantic version. /// @notice Semantic version.
/// @custom:semver 0.0.11 /// @custom:semver 0.0.12
string public constant version = "0.0.11"; string public constant version = "0.0.12";
/// @param _gameType The type ID of the game. /// @param _gameType The type ID of the game.
/// @param _absolutePrestate The absolute prestate of the instruction trace. /// @param _absolutePrestate The absolute prestate of the instruction trace.
...@@ -281,7 +281,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver { ...@@ -281,7 +281,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver {
} }
/// @inheritdoc IFaultDisputeGame /// @inheritdoc IFaultDisputeGame
function addLocalData(uint256 _ident, uint256 _l2BlockNumber, uint256 _partOffset) external { function addLocalData(uint256 _ident, bytes32 _localContext, uint256 _partOffset) external {
// INVARIANT: Local data can only be added if the game is currently in progress. // INVARIANT: Local data can only be added if the game is currently in progress.
if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress();
...@@ -293,7 +293,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver { ...@@ -293,7 +293,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, ISemver {
// Store the `_ident` argument // Store the `_ident` argument
mstore(0x20, _ident) mstore(0x20, _ident)
// Store the `_localContext` argument // Store the `_localContext` argument
mstore(0x40, _l2BlockNumber) mstore(0x40, _localContext)
// Store the data to load // Store the data to load
let data let data
switch _ident switch _ident
......
...@@ -35,7 +35,7 @@ interface IBigStepper { ...@@ -35,7 +35,7 @@ interface IBigStepper {
function step( function step(
bytes calldata _stateData, bytes calldata _stateData,
bytes calldata _proof, bytes calldata _proof,
uint256 _localContext bytes32 _localContext
) )
external external
returns (bytes32 postState_); returns (bytes32 postState_);
......
...@@ -70,10 +70,9 @@ interface IFaultDisputeGame is IDisputeGame { ...@@ -70,10 +70,9 @@ interface IFaultDisputeGame is IDisputeGame {
/// @notice Posts the requested local data to the VM's `PreimageOralce`. /// @notice Posts the requested local data to the VM's `PreimageOralce`.
/// @param _ident The local identifier of the data to post. /// @param _ident The local identifier of the data to post.
/// @param _l2BlockNumber The L2 block number being disputed. This serves as the local context for the /// @param _localContext The local context for the `PreimageOracle` key.
/// `PreimageOracle` key.
/// @param _partOffset The offset of the data to post. /// @param _partOffset The offset of the data to post.
function addLocalData(uint256 _ident, uint256 _l2BlockNumber, uint256 _partOffset) external; function addLocalData(uint256 _ident, bytes32 _localContext, uint256 _partOffset) external;
/// @notice Resolves the subgame rooted at the given claim index. /// @notice Resolves the subgame rooted at the given claim index.
/// @dev This function must be called bottom-up in the DAG /// @dev This function must be called bottom-up in the DAG
......
...@@ -484,7 +484,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ...@@ -484,7 +484,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init {
} }
/// @dev Tests that adding local data with an out of bounds identifier reverts. /// @dev Tests that adding local data with an out of bounds identifier reverts.
function testFuzz_addLocalData_oob_reverts(uint256 _ident, uint256 _localContext) public { function testFuzz_addLocalData_oob_reverts(uint256 _ident, bytes32 _localContext) public {
// [1, 5] are valid local data identifiers. // [1, 5] are valid local data identifiers.
if (_ident <= 5) _ident = 0; if (_ident <= 5) _ident = 0;
...@@ -529,7 +529,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ...@@ -529,7 +529,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init {
} }
/// @dev Helper to get the localized key for an identifier in the context of the game proxy. /// @dev Helper to get the localized key for an identifier in the context of the game proxy.
function _getKey(uint256 _ident, uint256 _localContext) internal view returns (bytes32) { function _getKey(uint256 _ident, bytes32 _localContext) internal view returns (bytes32) {
bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy), _localContext)); bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy), _localContext));
return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248)); return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248));
} }
......
...@@ -60,7 +60,7 @@ contract PreimageOracle_Test is Test { ...@@ -60,7 +60,7 @@ contract PreimageOracle_Test is Test {
for (uint256 i; i < words.length; i++) { for (uint256 i; i < words.length; i++) {
// Load the local data into the preimage oracle under the test contract's context // Load the local data into the preimage oracle under the test contract's context
// and the given local context. // and the given local context.
bytes32 contextKey = oracle.loadLocalData(ident, i, words[i], size, partOffset); bytes32 contextKey = oracle.loadLocalData(ident, bytes32(i), words[i], size, partOffset);
// Validate that the pre-image part is set // Validate that the pre-image part is set
bool ok = oracle.preimagePartOk(contextKey, partOffset); bool ok = oracle.preimagePartOk(contextKey, partOffset);
...@@ -79,7 +79,7 @@ contract PreimageOracle_Test is Test { ...@@ -79,7 +79,7 @@ contract PreimageOracle_Test is Test {
/// @notice Tests that context-specific data [0, 32] bytes in length can be loaded correctly. /// @notice Tests that context-specific data [0, 32] bytes in length can be loaded correctly.
function testFuzz_loadLocalData_varyingLength_succeeds( function testFuzz_loadLocalData_varyingLength_succeeds(
uint256 ident, uint256 ident,
uint256 localContext, bytes32 localContext,
bytes32 word, bytes32 word,
uint256 size, uint256 size,
uint256 partOffset uint256 partOffset
......
...@@ -17,7 +17,7 @@ contract AlphabetVM is IBigStepper { ...@@ -17,7 +17,7 @@ contract AlphabetVM is IBigStepper {
} }
/// @inheritdoc IBigStepper /// @inheritdoc IBigStepper
function step(bytes calldata _stateData, bytes calldata, uint256) external view returns (bytes32 postState_) { function step(bytes calldata _stateData, bytes calldata, bytes32) external view returns (bytes32 postState_) {
uint256 traceIndex; uint256 traceIndex;
uint256 claim; uint256 claim;
if ((keccak256(_stateData) << 8) == (Claim.unwrap(ABSOLUTE_PRESTATE) << 8)) { if ((keccak256(_stateData) << 8) == (Claim.unwrap(ABSOLUTE_PRESTATE) << 8)) {
......
...@@ -59,9 +59,9 @@ ...@@ -59,9 +59,9 @@
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"jest-dom": "link:@types/@testing-library/jest-dom", "jest-dom": "link:@types/@testing-library/jest-dom",
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^8.0.1",
"typescript": "^5.2.2", "typescript": "^5.3.2",
"vite": "^4.5.0", "vite": "^5.0.2",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
...@@ -77,11 +77,11 @@ ...@@ -77,11 +77,11 @@
} }
}, },
"dependencies": { "dependencies": {
"@testing-library/react": "^14.0.0", "@testing-library/react": "^14.1.2",
"@types/change-case": "^2.3.1", "@types/change-case": "^2.3.1",
"change-case": "4.1.2", "change-case": "4.1.2",
"react": "^18.2.0", "react": "^18.2.0",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"viem": "^1.19.4" "viem": "^1.19.7"
} }
} }
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
"node-fetch": "^2.6.7" "node-fetch": "^2.6.7"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^20.9.0", "@types/node": "^20.9.3",
"mocha": "^10.2.0" "mocha": "^10.2.0"
} }
} }
...@@ -42,10 +42,10 @@ ...@@ -42,10 +42,10 @@
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"jest-dom": "link:@types/@testing-library/jest-dom", "jest-dom": "link:@types/@testing-library/jest-dom",
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^8.0.1",
"typescript": "^5.2.2", "typescript": "^5.3.2",
"viem": "^1.19.4", "viem": "^1.19.7",
"vite": "^4.5.0", "vite": "^5.0.2",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
......
# @eth-optimism/sdk # @eth-optimism/sdk
## 3.1.6
### Patch Changes
- [#8212](https://github.com/ethereum-optimism/optimism/pull/8212) [`dd0e46986`](https://github.com/ethereum-optimism/optimism/commit/dd0e46986f19dcceb304fc48f2bd410685ecd179) Thanks [@smartcontracts](https://github.com/smartcontracts)! - Simplifies getMessageStatus to use an O(1) lookup instead of an event query
## 3.1.5 ## 3.1.5
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/sdk", "name": "@eth-optimism/sdk",
"version": "3.1.5", "version": "3.1.6",
"description": "[Optimism] Tools for working with Optimism", "description": "[Optimism] Tools for working with Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -43,20 +43,20 @@ ...@@ -43,20 +43,20 @@
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
"@types/chai": "^4.3.10", "@types/chai": "^4.3.10",
"@types/chai-as-promised": "^7.1.8", "@types/chai-as-promised": "^7.1.8",
"@types/mocha": "^10.0.4", "@types/mocha": "^10.0.6",
"@types/node": "^20.9.0", "@types/node": "^20.9.3",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
"ethereum-waffle": "^4.0.10", "ethereum-waffle": "^4.0.10",
"ethers": "^5.7.2", "ethers": "^5.7.2",
"hardhat": "^2.19.0", "hardhat": "^2.19.1",
"hardhat-deploy": "^0.11.43", "hardhat-deploy": "^0.11.43",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"mocha": "^10.2.0", "mocha": "^10.2.0",
"nyc": "^15.1.0", "nyc": "^15.1.0",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typedoc": "^0.25.3", "typedoc": "^0.25.3",
"typescript": "^5.2.2", "typescript": "^5.3.2",
"viem": "^1.19.4", "viem": "^1.19.7",
"vitest": "^0.34.2", "vitest": "^0.34.2",
"zod": "^3.22.4" "zod": "^3.22.4"
}, },
......
...@@ -667,25 +667,54 @@ export class CrossChainMessenger { ...@@ -667,25 +667,54 @@ export class CrossChainMessenger {
toBlockOrBlockHash?: BlockTag toBlockOrBlockHash?: BlockTag
): Promise<MessageStatus> { ): Promise<MessageStatus> {
const resolved = await this.toCrossChainMessage(message, messageIndex) const resolved = await this.toCrossChainMessage(message, messageIndex)
const receipt = await this.getMessageReceipt( // legacy withdrawals relayed prebedrock are v1
resolved, const messageHashV0 = hashCrossDomainMessagev0(
messageIndex, resolved.target,
fromBlockOrBlockHash, resolved.sender,
toBlockOrBlockHash resolved.message,
resolved.messageNonce
)
// bedrock withdrawals are v1
// legacy withdrawals relayed postbedrock are v1
// there is no good way to differentiate between the two types of legacy
// so what we will check for both
const messageHashV1 = hashCrossDomainMessagev1(
resolved.messageNonce,
resolved.sender,
resolved.target,
resolved.value,
resolved.minGasLimit,
resolved.message
) )
// Here we want the messenger that will receive the message, not the one that sent it.
const messenger =
resolved.direction === MessageDirection.L1_TO_L2
? this.contracts.l2.L2CrossDomainMessenger
: this.contracts.l1.L1CrossDomainMessenger
const success =
(await messenger.successfulMessages(messageHashV0)) ||
(await messenger.successfulMessages(messageHashV1))
const failure =
(await messenger.failedMessages(messageHashV0)) ||
(await messenger.failedMessages(messageHashV1))
if (resolved.direction === MessageDirection.L1_TO_L2) { if (resolved.direction === MessageDirection.L1_TO_L2) {
if (receipt === null) { if (success) {
return MessageStatus.UNCONFIRMED_L1_TO_L2_MESSAGE
} else {
if (receipt.receiptStatus === MessageReceiptStatus.RELAYED_SUCCEEDED) {
return MessageStatus.RELAYED return MessageStatus.RELAYED
} else { } else if (failure) {
return MessageStatus.FAILED_L1_TO_L2_MESSAGE return MessageStatus.FAILED_L1_TO_L2_MESSAGE
} else {
return MessageStatus.UNCONFIRMED_L1_TO_L2_MESSAGE
} }
}
} else { } else {
if (receipt === null) { if (success) {
return MessageStatus.RELAYED
} else if (failure) {
return MessageStatus.READY_FOR_RELAY
} else {
let timestamp: number let timestamp: number
if (this.bedrock) { if (this.bedrock) {
const output = await this.getMessageBedrockOutput( const output = await this.getMessageBedrockOutput(
...@@ -737,12 +766,6 @@ export class CrossChainMessenger { ...@@ -737,12 +766,6 @@ export class CrossChainMessenger {
} else { } else {
return MessageStatus.READY_FOR_RELAY return MessageStatus.READY_FOR_RELAY
} }
} else {
if (receipt.receiptStatus === MessageReceiptStatus.RELAYED_SUCCEEDED) {
return MessageStatus.RELAYED
} else {
return MessageStatus.READY_FOR_RELAY
}
} }
} }
} }
......
...@@ -8,6 +8,8 @@ contract MockMessenger is ICrossDomainMessenger { ...@@ -8,6 +8,8 @@ contract MockMessenger is ICrossDomainMessenger {
} }
uint256 public nonce; uint256 public nonce;
mapping (bytes32 => bool) public successfulMessages;
mapping (bytes32 => bool) public failedMessages;
// Empty function to satisfy the interface. // Empty function to satisfy the interface.
function sendMessage( function sendMessage(
...@@ -81,6 +83,7 @@ contract MockMessenger is ICrossDomainMessenger { ...@@ -81,6 +83,7 @@ contract MockMessenger is ICrossDomainMessenger {
) public { ) public {
for (uint256 i = 0; i < _params.length; i++) { for (uint256 i = 0; i < _params.length; i++) {
emit RelayedMessage(_params[i]); emit RelayedMessage(_params[i]);
successfulMessages[_params[i]] = true;
} }
} }
...@@ -89,6 +92,7 @@ contract MockMessenger is ICrossDomainMessenger { ...@@ -89,6 +92,7 @@ contract MockMessenger is ICrossDomainMessenger {
) public { ) public {
for (uint256 i = 0; i < _params.length; i++) { for (uint256 i = 0; i < _params.length; i++) {
emit FailedRelayedMessage(_params[i]); emit FailedRelayedMessage(_params[i]);
failedMessages[_params[i]] = true;
} }
} }
} }
...@@ -33,12 +33,12 @@ ...@@ -33,12 +33,12 @@
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/contracts-ts": "workspace:^", "@eth-optimism/contracts-ts": "workspace:^",
"@swc/core": "^1.3.95", "@swc/core": "^1.3.99",
"@vitest/coverage-istanbul": "^0.34.6", "@vitest/coverage-istanbul": "^0.34.6",
"tsup": "^7.2.0", "tsup": "^8.0.1",
"typescript": "^5.2.2", "typescript": "^5.3.2",
"viem": "^1.19.4", "viem": "^1.19.7",
"vite": "^4.5.0", "vite": "^5.0.2",
"vitest": "^0.34.1", "vitest": "^0.34.1",
"zod": "^3.22.4" "zod": "^3.22.4"
}, },
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment