Commit ce14288a authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into inphi/mips-style

parents c78bce65 ec4ed145
...@@ -524,7 +524,7 @@ jobs: ...@@ -524,7 +524,7 @@ jobs:
op-bindings-build: op-bindings-build:
docker: docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: medium resource_class: large
steps: steps:
- checkout - checkout
- run: - run:
...@@ -885,6 +885,7 @@ jobs: ...@@ -885,6 +885,7 @@ jobs:
docker: docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
- image: cimg/postgres:14.1 - image: cimg/postgres:14.1
resource_class: large
steps: steps:
- checkout - checkout
- check-changed: - check-changed:
...@@ -911,7 +912,7 @@ jobs: ...@@ -911,7 +912,7 @@ jobs:
name: Test name: Test
command: | command: |
mkdir -p /test-results mkdir -p /test-results
DB_USER=postgres gotestsum --junitfile /test-results/tests.xml DB_USER=postgres gotestsum --junitfile /test-results/tests.xml -- -parallel=4 ./...
working_directory: indexer working_directory: indexer
- run: - run:
name: Build name: Build
......
...@@ -10,9 +10,6 @@ ...@@ -10,9 +10,6 @@
[submodule "packages/contracts-bedrock/lib/clones-with-immutable-args"] [submodule "packages/contracts-bedrock/lib/clones-with-immutable-args"]
path = packages/contracts-bedrock/lib/clones-with-immutable-args path = packages/contracts-bedrock/lib/clones-with-immutable-args
url = https://github.com/Saw-mon-and-Natalie/clones-with-immutable-args url = https://github.com/Saw-mon-and-Natalie/clones-with-immutable-args
[submodule "packages/contracts-bedrock/lib/ds-test"]
path = packages/contracts-bedrock/lib/ds-test
url = https://github.com/dapphub/ds-test
[submodule "packages/contracts-bedrock/lib/forge-std"] [submodule "packages/contracts-bedrock/lib/forge-std"]
path = packages/contracts-bedrock/lib/forge-std path = packages/contracts-bedrock/lib/forge-std
url = https://github.com/foundry-rs/forge-std url = https://github.com/foundry-rs/forge-std
// This function does not modify the lockfile. It asserts that packages do not use SSH
// when specifying git repository
function afterAllResolved(lockfile, context) {
const pkgs = lockfile['packages'];
for (const [pkg, entry] of Object.entries(pkgs)) {
const repo = entry.resolution['repo'];
if (repo !== undefined) {
if (repo.startsWith('git@github.com')) {
throw new Error(`Invalid git ssh specification found for package ${pkg}. Ensure sure that the dependencies do not reference SSH-based git repos before running installing them`);
}
}
}
return lockfile
}
module.exports = {
hooks: {
afterAllResolved
}
}
...@@ -9,7 +9,7 @@ import datetime ...@@ -9,7 +9,7 @@ import datetime
import time import time
import shutil import shutil
import http.client import http.client
import multiprocessing from multiprocessing import Process, Queue
import devnet.log_setup import devnet.log_setup
...@@ -25,6 +25,26 @@ class Bunch: ...@@ -25,6 +25,26 @@ class Bunch:
def __init__(self, **kwds): def __init__(self, **kwds):
self.__dict__.update(kwds) self.__dict__.update(kwds)
class ChildProcess:
def __init__(self, func, *args):
self.errq = Queue()
self.process = Process(target=self._func, args=(func, args))
def _func(self, func, args):
try:
func(*args)
except Exception as e:
self.errq.put(str(e))
def start(self):
self.process.start()
def join(self):
self.process.join()
def get_error(self):
return self.errq.get() if not self.errq.empty() else None
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
...@@ -103,9 +123,12 @@ def devnet_l1_genesis(paths): ...@@ -103,9 +123,12 @@ def devnet_l1_genesis(paths):
'--verbosity', '4', '--gcmode', 'archive', '--dev.gaslimit', '30000000' '--verbosity', '4', '--gcmode', 'archive', '--dev.gaslimit', '30000000'
]) ])
forge = multiprocessing.Process(target=deploy_contracts, args=(paths,)) forge = ChildProcess(deploy_contracts, paths)
forge.start() forge.start()
forge.join() forge.join()
err = forge.get_error()
if err:
raise Exception(f"Exception occurred in child process: {err}")
res = debug_dumpBlock('127.0.0.1:8545') res = debug_dumpBlock('127.0.0.1:8545')
response = json.loads(res) response = json.loads(res)
......
...@@ -18,14 +18,14 @@ require ( ...@@ -18,14 +18,14 @@ require (
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2 github.com/hashicorp/golang-lru v1.0.2
github.com/hashicorp/golang-lru/v2 v2.0.1 github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0 github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.4.3 github.com/jackc/pgx/v5 v5.4.3
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/libp2p/go-libp2p v0.25.1 github.com/libp2p/go-libp2p v0.27.8
github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0
github.com/mattn/go-isatty v0.0.19 github.com/mattn/go-isatty v0.0.19
...@@ -87,15 +87,15 @@ require ( ...@@ -87,15 +87,15 @@ require (
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
...@@ -105,7 +105,7 @@ require ( ...@@ -105,7 +105,7 @@ require (
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/go-cid v0.3.2 // indirect github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
...@@ -115,15 +115,15 @@ require ( ...@@ -115,15 +115,15 @@ require (
github.com/jbenet/goprocess v0.1.4 // indirect github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/compress v1.16.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect github.com/libp2p/go-nat v0.1.0 // indirect
...@@ -134,7 +134,7 @@ require ( ...@@ -134,7 +134,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect github.com/miekg/dns v1.1.53 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/sha256-simd v1.0.0 // indirect
...@@ -144,26 +144,25 @@ require ( ...@@ -144,26 +144,25 @@ require (
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect github.com/multiformats/go-multicodec v0.8.1 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiformats/go-varint v0.0.7 // indirect
github.com/onsi/ginkgo/v2 v2.8.1 // indirect github.com/onsi/ginkgo/v2 v2.9.2 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect github.com/quic-go/quic-go v0.33.0 // indirect
github.com/quic-go/quic-go v0.32.0 // indirect github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/quic-go/webtransport-go v0.5.1 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect
...@@ -181,15 +180,15 @@ require ( ...@@ -181,15 +180,15 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.16.1 // indirect go.uber.org/dig v1.16.1 // indirect
go.uber.org/fx v1.19.1 // indirect go.uber.org/fx v1.19.2 // indirect
go.uber.org/multierr v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.11.0 // indirect golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.11.0 // indirect golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.7.0 // indirect golang.org/x/tools v0.7.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
......
This diff is collapsed.
...@@ -22,6 +22,8 @@ type BlockHeader struct { ...@@ -22,6 +22,8 @@ type BlockHeader struct {
ParentHash common.Hash `gorm:"serializer:json"` ParentHash common.Hash `gorm:"serializer:json"`
Number U256 Number U256
Timestamp uint64 Timestamp uint64
GethHeader *GethHeader `gorm:"serializer:rlp;column:rlp_bytes"`
} }
func BlockHeaderFromGethHeader(header *types.Header) BlockHeader { func BlockHeaderFromGethHeader(header *types.Header) BlockHeader {
...@@ -30,6 +32,8 @@ func BlockHeaderFromGethHeader(header *types.Header) BlockHeader { ...@@ -30,6 +32,8 @@ func BlockHeaderFromGethHeader(header *types.Header) BlockHeader {
ParentHash: header.ParentHash, ParentHash: header.ParentHash,
Number: U256{Int: header.Number}, Number: U256{Int: header.Number},
Timestamp: header.Time, Timestamp: header.Time,
GethHeader: (*GethHeader)(header),
} }
} }
......
...@@ -16,26 +16,37 @@ import ( ...@@ -16,26 +16,37 @@ import (
*/ */
type ContractEvent struct { type ContractEvent struct {
GUID uuid.UUID `gorm:"primaryKey"` GUID uuid.UUID `gorm:"primaryKey"`
BlockHash common.Hash `gorm:"serializer:json"` BlockHash common.Hash `gorm:"serializer:json"`
TransactionHash common.Hash `gorm:"serializer:json"` ContractAddress common.Address `gorm:"serializer:json"`
TransactionHash common.Hash `gorm:"serializer:json"`
EventSignature common.Hash `gorm:"serializer:json"` EventSignature common.Hash `gorm:"serializer:json"`
LogIndex uint64 LogIndex uint64
Timestamp uint64 Timestamp uint64
GethLog *types.Log `gorm:"serializer:rlp;column:rlp_bytes"`
} }
func ContractEventFromGethLog(log *types.Log, timestamp uint64) ContractEvent { func ContractEventFromGethLog(log *types.Log, timestamp uint64) ContractEvent {
eventSig := common.Hash{}
if len(log.Topics) > 0 {
eventSig = log.Topics[0]
}
return ContractEvent{ return ContractEvent{
GUID: uuid.New(), GUID: uuid.New(),
BlockHash: log.BlockHash, BlockHash: log.BlockHash,
ContractAddress: log.Address,
TransactionHash: log.TxHash, TransactionHash: log.TxHash,
EventSignature: log.Topics[0], EventSignature: eventSig,
LogIndex: uint64(log.Index), LogIndex: uint64(log.Index),
Timestamp: timestamp, Timestamp: timestamp,
GethLog: log,
} }
} }
......
package database
import (
"context"
"fmt"
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
"gorm.io/gorm/schema"
)
type RLPSerializer struct{}
type RLPInterface interface {
rlp.Encoder
rlp.Decoder
}
func init() {
schema.RegisterSerializer("rlp", RLPSerializer{})
}
func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType)
if dbValue != nil {
var bytes []byte
switch v := dbValue.(type) {
case []byte:
bytes = v
case string:
b, err := hexutil.Decode(v)
if err != nil {
return err
}
bytes = b
default:
return fmt.Errorf("unrecognized RLP bytes: %#v", dbValue)
}
if len(bytes) > 0 {
err := rlp.DecodeBytes(bytes, fieldValue.Interface())
if err != nil {
return err
}
}
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}
func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
// Even though rlp.Encode takes an interface and will error out if the passed interface does not
// satisfy the interface, we check here since we also want to make sure this type satisfies the
// rlp.Decoder interface as well
i := reflect.TypeOf(new(RLPInterface)).Elem()
if !reflect.TypeOf(fieldValue).Implements(i) {
return nil, fmt.Errorf("%T does not satisfy RLP Encoder & Decoder interface", fieldValue)
}
rlpBytes, err := rlp.EncodeToBytes(fieldValue)
if err != nil {
return nil, err
}
return hexutil.Bytes(rlpBytes).MarshalText()
}
...@@ -3,8 +3,12 @@ package database ...@@ -3,8 +3,12 @@ package database
import ( import (
"database/sql/driver" "database/sql/driver"
"errors" "errors"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/jackc/pgtype" "github.com/jackc/pgtype"
) )
...@@ -13,7 +17,7 @@ var big10 = big.NewInt(10) ...@@ -13,7 +17,7 @@ var big10 = big.NewInt(10)
var ErrU256Overflow = errors.New("number exceeds u256") var ErrU256Overflow = errors.New("number exceeds u256")
var ErrU256ContainsDecimal = errors.New("number contains fractional digits") var ErrU256ContainsDecimal = errors.New("number contains fractional digits")
var ErrU256NotNull = errors.New("number cannot be null") var ErrU256Null = errors.New("number cannot be null")
// U256 is a wrapper over big.Int that conforms to the database U256 numeric domain type // U256 is a wrapper over big.Int that conforms to the database U256 numeric domain type
type U256 struct { type U256 struct {
...@@ -30,7 +34,7 @@ func (u256 *U256) Scan(src interface{}) error { ...@@ -30,7 +34,7 @@ func (u256 *U256) Scan(src interface{}) error {
} else if numeric.Exp < 0 { } else if numeric.Exp < 0 {
return ErrU256ContainsDecimal return ErrU256ContainsDecimal
} else if numeric.Status == pgtype.Null { } else if numeric.Status == pgtype.Null {
return ErrU256NotNull return ErrU256Null
} }
// factor in the powers of 10 // factor in the powers of 10
...@@ -54,7 +58,7 @@ func (u256 *U256) Scan(src interface{}) error { ...@@ -54,7 +58,7 @@ func (u256 *U256) Scan(src interface{}) error {
func (u256 U256) Value() (driver.Value, error) { func (u256 U256) Value() (driver.Value, error) {
// check bounds // check bounds
if u256.Int == nil { if u256.Int == nil {
return nil, ErrU256NotNull return nil, ErrU256Null
} else if u256.Int.Cmp(u256BigIntOverflow) >= 0 { } else if u256.Int.Cmp(u256BigIntOverflow) >= 0 {
return nil, ErrU256Overflow return nil, ErrU256Overflow
} }
...@@ -63,3 +67,29 @@ func (u256 U256) Value() (driver.Value, error) { ...@@ -63,3 +67,29 @@ func (u256 U256) Value() (driver.Value, error) {
numeric := pgtype.Numeric{Int: u256.Int, Status: pgtype.Present} numeric := pgtype.Numeric{Int: u256.Int, Status: pgtype.Present}
return numeric.Value() return numeric.Value()
} }
type GethHeader types.Header
func (h *GethHeader) EncodeRLP(w io.Writer) error {
return types.NewBlockWithHeader((*types.Header)(h)).EncodeRLP(w)
}
func (h *GethHeader) DecodeRLP(s *rlp.Stream) error {
block := new(types.Block)
err := block.DecodeRLP(s)
if err != nil {
return err
}
header := block.Header()
*h = (GethHeader)(*header)
return nil
}
func (h *GethHeader) Header() *types.Header {
return (*types.Header)(h)
}
func (h *GethHeader) Hash() common.Hash {
return h.Header().Hash()
}
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -62,6 +63,9 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -62,6 +63,9 @@ func TestE2EBlockHeaders(t *testing.T) {
require.Equal(t, header.Hash(), indexedHeader.Hash) require.Equal(t, header.Hash(), indexedHeader.Hash)
require.Equal(t, header.ParentHash, indexedHeader.ParentHash) require.Equal(t, header.ParentHash, indexedHeader.ParentHash)
require.Equal(t, header.Time, indexedHeader.Timestamp) require.Equal(t, header.Time, indexedHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes sufficies
require.Equal(t, header.Hash(), indexedHeader.GethHeader.Hash())
} }
}) })
...@@ -116,9 +120,17 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -116,9 +120,17 @@ func TestE2EBlockHeaders(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, log.Topics[0], contractEvent.EventSignature) require.Equal(t, log.Topics[0], contractEvent.EventSignature)
require.Equal(t, log.BlockHash, contractEvent.BlockHash) require.Equal(t, log.BlockHash, contractEvent.BlockHash)
require.Equal(t, log.Address, contractEvent.ContractAddress)
require.Equal(t, log.TxHash, contractEvent.TransactionHash) require.Equal(t, log.TxHash, contractEvent.TransactionHash)
require.Equal(t, log.Index, uint(contractEvent.LogIndex)) require.Equal(t, log.Index, uint(contractEvent.LogIndex))
// ensure the right rlp encoding of the contract log is stored
logRlp, err := rlp.EncodeToBytes(&log)
require.NoError(t, err)
contractEventRlp, err := rlp.EncodeToBytes(contractEvent.GethLog)
require.NoError(t, err)
require.ElementsMatch(t, logRlp, contractEventRlp)
// ensure the block is also indexed // ensure the block is also indexed
block, err := testSuite.L1Client.BlockByNumber(testCtx, big.NewInt(int64(log.BlockNumber))) block, err := testSuite.L1Client.BlockByNumber(testCtx, big.NewInt(int64(log.BlockNumber)))
require.NoError(t, err) require.NoError(t, err)
...@@ -131,6 +143,10 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -131,6 +143,10 @@ func TestE2EBlockHeaders(t *testing.T) {
require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash) require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash)
require.Equal(t, block.Number(), l1BlockHeader.Number.Int) require.Equal(t, block.Number(), l1BlockHeader.Number.Int)
require.Equal(t, block.Time(), l1BlockHeader.Timestamp) require.Equal(t, block.Time(), l1BlockHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes
// suffices as it is based on the rlp bytes of the header
require.Equal(t, block.Hash(), l1BlockHeader.GethHeader.Hash())
} }
}) })
} }
...@@ -7,18 +7,25 @@ CREATE DOMAIN UINT256 AS NUMERIC ...@@ -7,18 +7,25 @@ CREATE DOMAIN UINT256 AS NUMERIC
*/ */
CREATE TABLE IF NOT EXISTS l1_block_headers ( CREATE TABLE IF NOT EXISTS l1_block_headers (
-- Searchable fields
hash VARCHAR NOT NULL PRIMARY KEY, hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL, parent_hash VARCHAR NOT NULL,
number UINT256 NOT NULL, number UINT256 NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) timestamp INTEGER NOT NULL CHECK (timestamp > 0),
-- Raw Data
rlp_bytes VARCHAR NOT NULL
); );
CREATE TABLE IF NOT EXISTS l2_block_headers ( CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Block header -- Searchable fields
hash VARCHAR NOT NULL PRIMARY KEY, hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL, parent_hash VARCHAR NOT NULL,
number UINT256 NOT NULL, number UINT256 NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) timestamp INTEGER NOT NULL CHECK (timestamp > 0),
-- Raw Data
rlp_bytes VARCHAR NOT NULL
); );
/** /**
...@@ -26,21 +33,31 @@ CREATE TABLE IF NOT EXISTS l2_block_headers ( ...@@ -26,21 +33,31 @@ CREATE TABLE IF NOT EXISTS l2_block_headers (
*/ */
CREATE TABLE IF NOT EXISTS l1_contract_events ( CREATE TABLE IF NOT EXISTS l1_contract_events (
-- Searchable fields
guid VARCHAR NOT NULL PRIMARY KEY, guid VARCHAR NOT NULL PRIMARY KEY,
block_hash VARCHAR NOT NULL REFERENCES l1_block_headers(hash), block_hash VARCHAR NOT NULL REFERENCES l1_block_headers(hash),
contract_address VARCHAR NOT NULL,
transaction_hash VARCHAR NOT NULL, transaction_hash VARCHAR NOT NULL,
event_signature VARCHAR NOT NULL,
log_index INTEGER NOT NULL, log_index INTEGER NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) event_signature VARCHAR NOT NULL, -- bytes32(0x0) when topics are missing
timestamp INTEGER NOT NULL CHECK (timestamp > 0),
-- Raw Data
rlp_bytes VARCHAR NOT NULL
); );
CREATE TABLE IF NOT EXISTS l2_contract_events ( CREATE TABLE IF NOT EXISTS l2_contract_events (
-- Searchable fields
guid VARCHAR NOT NULL PRIMARY KEY, guid VARCHAR NOT NULL PRIMARY KEY,
block_hash VARCHAR NOT NULL REFERENCES l2_block_headers(hash), block_hash VARCHAR NOT NULL REFERENCES l2_block_headers(hash),
contract_address VARCHAR NOT NULL,
transaction_hash VARCHAR NOT NULL, transaction_hash VARCHAR NOT NULL,
event_signature VARCHAR NOT NULL,
log_index INTEGER NOT NULL, log_index INTEGER NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) event_signature VARCHAR NOT NULL, -- bytes32(0x0) when topics are missing
timestamp INTEGER NOT NULL CHECK (timestamp > 0),
-- Raw Data
rlp_bytes VARCHAR NOT NULL
); );
-- Tables that index finalization markers for L2 blocks. -- Tables that index finalization markers for L2 blocks.
......
...@@ -9,20 +9,17 @@ import ( ...@@ -9,20 +9,17 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/google/uuid"
) )
type ProcessedContractEventLogIndexKey struct { type ProcessedContractEventLogIndexKey struct {
header common.Hash blockHash common.Hash
index uint index uint
} }
type ProcessedContractEvents struct { type ProcessedContractEvents struct {
events []*database.ContractEvent events []*database.ContractEvent
eventsBySignature map[common.Hash][]*database.ContractEvent eventsBySignature map[common.Hash][]*database.ContractEvent
eventByLogIndex map[ProcessedContractEventLogIndexKey]*database.ContractEvent eventByLogIndex map[ProcessedContractEventLogIndexKey]*database.ContractEvent
eventLog map[uuid.UUID]*types.Log
} }
func NewProcessedContractEvents() *ProcessedContractEvents { func NewProcessedContractEvents() *ProcessedContractEvents {
...@@ -30,17 +27,18 @@ func NewProcessedContractEvents() *ProcessedContractEvents { ...@@ -30,17 +27,18 @@ func NewProcessedContractEvents() *ProcessedContractEvents {
events: []*database.ContractEvent{}, events: []*database.ContractEvent{},
eventsBySignature: make(map[common.Hash][]*database.ContractEvent), eventsBySignature: make(map[common.Hash][]*database.ContractEvent),
eventByLogIndex: make(map[ProcessedContractEventLogIndexKey]*database.ContractEvent), eventByLogIndex: make(map[ProcessedContractEventLogIndexKey]*database.ContractEvent),
eventLog: make(map[uuid.UUID]*types.Log),
} }
} }
func (p *ProcessedContractEvents) AddLog(log *types.Log, time uint64) *database.ContractEvent { func (p *ProcessedContractEvents) AddLog(log *types.Log, time uint64) *database.ContractEvent {
contractEvent := database.ContractEventFromGethLog(log, time) contractEvent := database.ContractEventFromGethLog(log, time)
emptyHash := common.Hash{}
p.events = append(p.events, &contractEvent) p.events = append(p.events, &contractEvent)
p.eventsBySignature[contractEvent.EventSignature] = append(p.eventsBySignature[contractEvent.EventSignature], &contractEvent)
p.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index}] = &contractEvent p.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index}] = &contractEvent
p.eventLog[contractEvent.GUID] = log if contractEvent.EventSignature != emptyHash { // ignore anon events
p.eventsBySignature[contractEvent.EventSignature] = append(p.eventsBySignature[contractEvent.EventSignature], &contractEvent)
}
return &contractEvent return &contractEvent
} }
......
...@@ -49,7 +49,7 @@ type CrossDomainMessengerRelayedMessageEvent struct { ...@@ -49,7 +49,7 @@ type CrossDomainMessengerRelayedMessageEvent struct {
} }
func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]CrossDomainMessengerSentMessageEvent, error) { func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]CrossDomainMessengerSentMessageEvent, error) {
crossDomainMessengerABI, err := bindings.L1CrossDomainMessengerMetaData.GetAbi() crossDomainMessengerABI, err := bindings.CrossDomainMessengerMetaData.GetAbi()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -60,16 +60,18 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C ...@@ -60,16 +60,18 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C
processedSentMessageEvents := events.eventsBySignature[sentMessageEventAbi.ID] processedSentMessageEvents := events.eventsBySignature[sentMessageEventAbi.ID]
crossDomainMessageEvents := make([]CrossDomainMessengerSentMessageEvent, len(processedSentMessageEvents)) crossDomainMessageEvents := make([]CrossDomainMessengerSentMessageEvent, len(processedSentMessageEvents))
for i, sentMessageEvent := range processedSentMessageEvents { for i, sentMessageEvent := range processedSentMessageEvents {
log := events.eventLog[sentMessageEvent.GUID] log := sentMessageEvent.GethLog
var sentMsgData bindings.CrossDomainMessengerSentMessage var sentMsgData bindings.CrossDomainMessengerSentMessage
sentMsgData.Raw = *log
err = UnpackLog(&sentMsgData, log, sentMessageEventAbi.Name, crossDomainMessengerABI) err = UnpackLog(&sentMsgData, log, sentMessageEventAbi.Name, crossDomainMessengerABI)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var sentMsgExtensionData bindings.CrossDomainMessengerSentMessageExtension1 var sentMsgExtensionData bindings.CrossDomainMessengerSentMessageExtension1
extensionLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GUID] extensionLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog
sentMsgExtensionData.Raw = *extensionLog
err = UnpackLog(&sentMsgExtensionData, extensionLog, sentMessageEventExtensionAbi.Name, crossDomainMessengerABI) err = UnpackLog(&sentMsgExtensionData, extensionLog, sentMessageEventExtensionAbi.Name, crossDomainMessengerABI)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -101,15 +103,19 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) ( ...@@ -101,15 +103,19 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) (
processedRelayedMessageEvents := events.eventsBySignature[relayedMessageEventAbi.ID] processedRelayedMessageEvents := events.eventsBySignature[relayedMessageEventAbi.ID]
crossDomainMessageEvents := make([]CrossDomainMessengerRelayedMessageEvent, len(processedRelayedMessageEvents)) crossDomainMessageEvents := make([]CrossDomainMessengerRelayedMessageEvent, len(processedRelayedMessageEvents))
for i, relayedMessageEvent := range processedRelayedMessageEvents { for i, relayedMessageEvent := range processedRelayedMessageEvents {
log := events.eventLog[relayedMessageEvent.GUID] log := relayedMessageEvent.GethLog
var relayedMsgData bindings.CrossDomainMessengerRelayedMessage var relayedMsgData bindings.CrossDomainMessengerRelayedMessage
relayedMsgData.Raw = *log
err = UnpackLog(&relayedMsgData, log, relayedMessageEventAbi.Name, crossDomainMessengerABI) err = UnpackLog(&relayedMsgData, log, relayedMessageEventAbi.Name, crossDomainMessengerABI)
if err != nil { if err != nil {
return nil, err return nil, err
} }
crossDomainMessageEvents[i] = CrossDomainMessengerRelayedMessageEvent{&relayedMsgData, relayedMessageEvent} crossDomainMessageEvents[i] = CrossDomainMessengerRelayedMessageEvent{
CrossDomainMessengerRelayedMessage: &relayedMsgData,
RawEvent: relayedMessageEvent,
}
} }
return crossDomainMessageEvents, nil return crossDomainMessageEvents, nil
......
...@@ -399,10 +399,10 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -399,10 +399,10 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
sentMessages := make([]*database.L1BridgeMessage, len(sentMessageEvents)) sentMessages := make([]*database.L1BridgeMessage, len(sentMessageEvents))
for i, sentMessageEvent := range sentMessageEvents { for i, sentMessageEvent := range sentMessageEvents {
log := events.eventLog[sentMessageEvent.RawEvent.GUID] log := sentMessageEvent.RawEvent.GethLog
// extract the deposit hash from the previous TransactionDepositedEvent // extract the deposit hash from the previous TransactionDepositedEvent
transactionDepositedLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GUID] transactionDepositedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GethLog
depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog) depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog)
if err != nil { if err != nil {
return err return err
...@@ -479,10 +479,10 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -479,10 +479,10 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
deposits := make([]*database.L1BridgeDeposit, len(initiatedDepositEvents)) deposits := make([]*database.L1BridgeDeposit, len(initiatedDepositEvents))
for i, initiatedBridgeEvent := range initiatedDepositEvents { for i, initiatedBridgeEvent := range initiatedDepositEvents {
log := events.eventLog[initiatedBridgeEvent.RawEvent.GUID] log := initiatedBridgeEvent.RawEvent.GethLog
// extract the deposit hash from the following TransactionDeposited event // extract the deposit hash from the following TransactionDeposited event
transactionDepositedLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GUID] transactionDepositedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog
depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog) depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog)
if err != nil { if err != nil {
return err return err
......
...@@ -106,17 +106,8 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -106,17 +106,8 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
l2Headers := make([]*database.L2BlockHeader, len(headers)) l2Headers := make([]*database.L2BlockHeader, len(headers))
l2HeaderMap := make(map[common.Hash]*types.Header) l2HeaderMap := make(map[common.Hash]*types.Header)
for i, header := range headers { for i, header := range headers {
blockHash := header.Hash() l2Headers[i] = &database.L2BlockHeader{BlockHeader: database.BlockHeaderFromGethHeader(header)}
l2Headers[i] = &database.L2BlockHeader{ l2HeaderMap[l2Headers[i].Hash] = header
BlockHeader: database.BlockHeader{
Hash: blockHash,
ParentHash: header.ParentHash,
Number: database.U256{Int: header.Number},
Timestamp: header.Time,
},
}
l2HeaderMap[blockHash] = header
} }
/** Watch for Contract Events **/ /** Watch for Contract Events **/
...@@ -255,10 +246,10 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -255,10 +246,10 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
sentMessages := make([]*database.L2BridgeMessage, len(sentMessageEvents)) sentMessages := make([]*database.L2BridgeMessage, len(sentMessageEvents))
for i, sentMessageEvent := range sentMessageEvents { for i, sentMessageEvent := range sentMessageEvents {
log := events.eventLog[sentMessageEvent.RawEvent.GUID] log := sentMessageEvent.RawEvent.GethLog
// extract the withdrawal hash from the previous MessagePassed event // extract the withdrawal hash from the previous MessagePassed event
msgPassedLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GUID] msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GethLog
msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog) msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog)
if err != nil { if err != nil {
return err return err
...@@ -355,10 +346,10 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -355,10 +346,10 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
withdrawals := make([]*database.L2BridgeWithdrawal, len(initiatedWithdrawalEvents)) withdrawals := make([]*database.L2BridgeWithdrawal, len(initiatedWithdrawalEvents))
for i, initiatedBridgeEvent := range initiatedWithdrawalEvents { for i, initiatedBridgeEvent := range initiatedWithdrawalEvents {
log := events.eventLog[initiatedBridgeEvent.RawEvent.GUID] log := initiatedBridgeEvent.RawEvent.GethLog
// extract the withdrawal hash from the following MessagePassed event // extract the withdrawal hash from the following MessagePassed event
msgPassedLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GUID] msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog
msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog) msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog)
if err != nil { if err != nil {
return err return err
......
...@@ -20,15 +20,19 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL ...@@ -20,15 +20,19 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL
processedMessagePassedEvents := events.eventsBySignature[l2ToL1MessagePasserAbi.Events[eventName].ID] processedMessagePassedEvents := events.eventsBySignature[l2ToL1MessagePasserAbi.Events[eventName].ID]
messagesPassed := make([]L2ToL1MessagePasserMessagePassed, len(processedMessagePassedEvents)) messagesPassed := make([]L2ToL1MessagePasserMessagePassed, len(processedMessagePassedEvents))
for i, messagePassedEvent := range processedMessagePassedEvents { for i, messagePassedEvent := range processedMessagePassedEvents {
log := events.eventLog[messagePassedEvent.GUID] log := messagePassedEvent.GethLog
var messagePassed bindings.L2ToL1MessagePasserMessagePassed var messagePassed bindings.L2ToL1MessagePasserMessagePassed
messagePassed.Raw = *log
err := UnpackLog(&messagePassed, log, eventName, l2ToL1MessagePasserAbi) err := UnpackLog(&messagePassed, log, eventName, l2ToL1MessagePasserAbi)
if err != nil { if err != nil {
return nil, err return nil, err
} }
messagesPassed[i] = L2ToL1MessagePasserMessagePassed{&messagePassed, messagePassedEvent} messagesPassed[i] = L2ToL1MessagePasserMessagePassed{
L2ToL1MessagePasserMessagePassed: &messagePassed,
RawEvent: messagePassedEvent,
}
} }
return messagesPassed, nil return messagesPassed, nil
......
package processor package processor
import ( import (
"context"
"errors" "errors"
"math/big" "math/big"
...@@ -9,10 +8,7 @@ import ( ...@@ -9,10 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
) )
type OptimismPortalTransactionDepositEvent struct { type OptimismPortalTransactionDepositEvent struct {
...@@ -51,7 +47,7 @@ func OptimismPortalTransactionDepositEvents(events *ProcessedContractEvents) ([] ...@@ -51,7 +47,7 @@ func OptimismPortalTransactionDepositEvents(events *ProcessedContractEvents) ([]
processedTxDepositedEvents := events.eventsBySignature[derive.DepositEventABIHash] processedTxDepositedEvents := events.eventsBySignature[derive.DepositEventABIHash]
txDeposits := make([]OptimismPortalTransactionDepositEvent, len(processedTxDepositedEvents)) txDeposits := make([]OptimismPortalTransactionDepositEvent, len(processedTxDepositedEvents))
for i, txDepositEvent := range processedTxDepositedEvents { for i, txDepositEvent := range processedTxDepositedEvents {
log := events.eventLog[txDepositEvent.GUID] log := txDepositEvent.GethLog
depositTx, err := derive.UnmarshalDepositLogEvent(log) depositTx, err := derive.UnmarshalDepositLogEvent(log)
if err != nil { if err != nil {
...@@ -59,12 +55,17 @@ func OptimismPortalTransactionDepositEvents(events *ProcessedContractEvents) ([] ...@@ -59,12 +55,17 @@ func OptimismPortalTransactionDepositEvents(events *ProcessedContractEvents) ([]
} }
var txDeposit bindings.OptimismPortalTransactionDeposited var txDeposit bindings.OptimismPortalTransactionDeposited
txDeposit.Raw = *log
err = UnpackLog(&txDeposit, log, eventName, optimismPortalAbi) err = UnpackLog(&txDeposit, log, eventName, optimismPortalAbi)
if err != nil { if err != nil {
return nil, err return nil, err
} }
txDeposits[i] = OptimismPortalTransactionDepositEvent{&txDeposit, depositTx, txDepositEvent} txDeposits[i] = OptimismPortalTransactionDepositEvent{
OptimismPortalTransactionDeposited: &txDeposit,
DepositTx: depositTx,
RawEvent: txDepositEvent,
}
} }
return txDeposits, nil return txDeposits, nil
...@@ -80,15 +81,19 @@ func OptimismPortalWithdrawalProvenEvents(events *ProcessedContractEvents) ([]Op ...@@ -80,15 +81,19 @@ func OptimismPortalWithdrawalProvenEvents(events *ProcessedContractEvents) ([]Op
processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID] processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID]
provenEvents := make([]OptimismPortalWithdrawalProvenEvent, len(processedWithdrawalProvenEvents)) provenEvents := make([]OptimismPortalWithdrawalProvenEvent, len(processedWithdrawalProvenEvents))
for i, provenEvent := range processedWithdrawalProvenEvents { for i, provenEvent := range processedWithdrawalProvenEvents {
log := events.eventLog[provenEvent.GUID] log := provenEvent.GethLog
var withdrawalProven bindings.OptimismPortalWithdrawalProven var withdrawalProven bindings.OptimismPortalWithdrawalProven
withdrawalProven.Raw = *log
err := UnpackLog(&withdrawalProven, log, eventName, optimismPortalAbi) err := UnpackLog(&withdrawalProven, log, eventName, optimismPortalAbi)
if err != nil { if err != nil {
return nil, err return nil, err
} }
provenEvents[i] = OptimismPortalWithdrawalProvenEvent{&withdrawalProven, provenEvent} provenEvents[i] = OptimismPortalWithdrawalProvenEvent{
OptimismPortalWithdrawalProven: &withdrawalProven,
RawEvent: provenEvent,
}
} }
return provenEvents, nil return provenEvents, nil
...@@ -104,7 +109,7 @@ func OptimismPortalWithdrawalFinalizedEvents(events *ProcessedContractEvents) ([ ...@@ -104,7 +109,7 @@ func OptimismPortalWithdrawalFinalizedEvents(events *ProcessedContractEvents) ([
processedWithdrawalFinalizedEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID] processedWithdrawalFinalizedEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID]
finalizedEvents := make([]OptimismPortalWithdrawalFinalizedEvent, len(processedWithdrawalFinalizedEvents)) finalizedEvents := make([]OptimismPortalWithdrawalFinalizedEvent, len(processedWithdrawalFinalizedEvents))
for i, finalizedEvent := range processedWithdrawalFinalizedEvents { for i, finalizedEvent := range processedWithdrawalFinalizedEvents {
log := events.eventLog[finalizedEvent.GUID] log := finalizedEvent.GethLog
var withdrawalFinalized bindings.OptimismPortalWithdrawalFinalized var withdrawalFinalized bindings.OptimismPortalWithdrawalFinalized
err := UnpackLog(&withdrawalFinalized, log, eventName, optimismPortalAbi) err := UnpackLog(&withdrawalFinalized, log, eventName, optimismPortalAbi)
...@@ -112,36 +117,11 @@ func OptimismPortalWithdrawalFinalizedEvents(events *ProcessedContractEvents) ([ ...@@ -112,36 +117,11 @@ func OptimismPortalWithdrawalFinalizedEvents(events *ProcessedContractEvents) ([
return nil, err return nil, err
} }
finalizedEvents[i] = OptimismPortalWithdrawalFinalizedEvent{&withdrawalFinalized, finalizedEvent} finalizedEvents[i] = OptimismPortalWithdrawalFinalizedEvent{
OptimismPortalWithdrawalFinalized: &withdrawalFinalized,
RawEvent: finalizedEvent,
}
} }
return finalizedEvents, nil return finalizedEvents, nil
} }
func OptimismPortalQueryProvenWithdrawal(ethClient *ethclient.Client, portalAddress common.Address, withdrawalHash common.Hash) (OptimismPortalProvenWithdrawal, error) {
var provenWithdrawal OptimismPortalProvenWithdrawal
optimismPortalAbi, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return provenWithdrawal, err
}
name := "provenWithdrawals"
txData, err := optimismPortalAbi.Pack(name, withdrawalHash)
if err != nil {
return provenWithdrawal, err
}
callMsg := ethereum.CallMsg{To: &portalAddress, Data: txData}
data, err := ethClient.CallContract(context.Background(), callMsg, nil)
if err != nil {
return provenWithdrawal, err
}
err = optimismPortalAbi.UnpackIntoInterface(&provenWithdrawal, name, data)
if err != nil {
return provenWithdrawal, err
}
return provenWithdrawal, nil
}
...@@ -97,7 +97,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -97,7 +97,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge
processedInitiatedBridgeEvents := events.eventsBySignature[StandardBridgeABI.Events[eventName].ID] processedInitiatedBridgeEvents := events.eventsBySignature[StandardBridgeABI.Events[eventName].ID]
initiatedBridgeEvents := make([]StandardBridgeInitiatedEvent, len(processedInitiatedBridgeEvents)) initiatedBridgeEvents := make([]StandardBridgeInitiatedEvent, len(processedInitiatedBridgeEvents))
for i, bridgeInitiatedEvent := range processedInitiatedBridgeEvents { for i, bridgeInitiatedEvent := range processedInitiatedBridgeEvents {
log := events.eventLog[bridgeInitiatedEvent.GUID] log := bridgeInitiatedEvent.GethLog
var bridgeData BridgeEvent var bridgeData BridgeEvent
err := UnpackLog(&bridgeData, log, eventName, StandardBridgeABI) err := UnpackLog(&bridgeData, log, eventName, StandardBridgeABI)
...@@ -109,7 +109,8 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -109,7 +109,8 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge
// - L1: BridgeInitiated -> Portal#DepositTransaction -> SentMessage ... // - L1: BridgeInitiated -> Portal#DepositTransaction -> SentMessage ...
// - L1: BridgeInitiated -> L2ToL1MessagePasser#MessagePassed -> SentMessage ... // - L1: BridgeInitiated -> L2ToL1MessagePasser#MessagePassed -> SentMessage ...
var sentMsgData bindings.L1CrossDomainMessengerSentMessage var sentMsgData bindings.L1CrossDomainMessengerSentMessage
sentMsgLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 2}].GUID] sentMsgLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 2}].GethLog
sentMsgData.Raw = *sentMsgLog
err = UnpackLog(&sentMsgData, sentMsgLog, sentMessageEventAbi.Name, l1CrossDomainMessengerABI) err = UnpackLog(&sentMsgData, sentMsgLog, sentMessageEventAbi.Name, l1CrossDomainMessengerABI)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -127,6 +128,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -127,6 +128,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge
// represent eth bridge as an erc20 // represent eth bridge as an erc20
erc20BridgeData = &bindings.StandardBridgeERC20BridgeInitiated{ erc20BridgeData = &bindings.StandardBridgeERC20BridgeInitiated{
Raw: *log,
// Represent ETH using the hardcoded address // Represent ETH using the hardcoded address
LocalToken: predeploys.LegacyERC20ETHAddr, RemoteToken: predeploys.LegacyERC20ETHAddr, LocalToken: predeploys.LegacyERC20ETHAddr, RemoteToken: predeploys.LegacyERC20ETHAddr,
// Bridge data // Bridge data
...@@ -136,6 +138,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -136,6 +138,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge
case bindings.StandardBridgeERC20BridgeInitiated: case bindings.StandardBridgeERC20BridgeInitiated:
_temp := any(bridgeData).(bindings.StandardBridgeERC20BridgeInitiated) _temp := any(bridgeData).(bindings.StandardBridgeERC20BridgeInitiated)
erc20BridgeData = &_temp erc20BridgeData = &_temp
erc20BridgeData.Raw = *log
expectedCrossDomainMessage, err = StandardBridgeABI.Pack(finalizeMethodName, erc20BridgeData.RemoteToken, erc20BridgeData.LocalToken, erc20BridgeData.From, erc20BridgeData.To, erc20BridgeData.Amount, erc20BridgeData.ExtraData) expectedCrossDomainMessage, err = StandardBridgeABI.Pack(finalizeMethodName, erc20BridgeData.RemoteToken, erc20BridgeData.LocalToken, erc20BridgeData.From, erc20BridgeData.To, erc20BridgeData.Amount, erc20BridgeData.ExtraData)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -146,7 +149,11 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -146,7 +149,11 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.StandardBridgeETHBridge
return nil, errors.New("bridge cross domain message mismatch") return nil, errors.New("bridge cross domain message mismatch")
} }
initiatedBridgeEvents[i] = StandardBridgeInitiatedEvent{erc20BridgeData, sentMsgData.MessageNonce, bridgeInitiatedEvent} initiatedBridgeEvents[i] = StandardBridgeInitiatedEvent{
StandardBridgeERC20BridgeInitiated: erc20BridgeData,
CrossDomainMessengerNonce: sentMsgData.MessageNonce,
RawEvent: bridgeInitiatedEvent,
}
} }
return initiatedBridgeEvents, nil return initiatedBridgeEvents, nil
...@@ -190,7 +197,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -190,7 +197,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge
processedFinalizedBridgeEvents := events.eventsBySignature[StandardBridgeABI.Events[eventName].ID] processedFinalizedBridgeEvents := events.eventsBySignature[StandardBridgeABI.Events[eventName].ID]
finalizedBridgeEvents := make([]StandardBridgeFinalizedEvent, len(processedFinalizedBridgeEvents)) finalizedBridgeEvents := make([]StandardBridgeFinalizedEvent, len(processedFinalizedBridgeEvents))
for i, bridgeFinalizedEvent := range processedFinalizedBridgeEvents { for i, bridgeFinalizedEvent := range processedFinalizedBridgeEvents {
log := events.eventLog[bridgeFinalizedEvent.GUID] log := bridgeFinalizedEvent.GethLog
var bridgeData BridgeEvent var bridgeData BridgeEvent
err := UnpackLog(&bridgeData, log, eventName, StandardBridgeABI) err := UnpackLog(&bridgeData, log, eventName, StandardBridgeABI)
...@@ -199,7 +206,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -199,7 +206,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge
} }
// Look for the RelayedMessage event that follows right after the BridgeFinalized Event // Look for the RelayedMessage event that follows right after the BridgeFinalized Event
relayedMsgLog := events.eventLog[events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GUID] relayedMsgLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog
if relayedMsgLog.Topics[0] != relayedMessageEventAbi.ID { if relayedMsgLog.Topics[0] != relayedMessageEventAbi.ID {
return nil, errors.New("unexpected bridge event ordering") return nil, errors.New("unexpected bridge event ordering")
} }
...@@ -257,6 +264,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -257,6 +264,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge
case bindings.StandardBridgeETHBridgeFinalized: case bindings.StandardBridgeETHBridgeFinalized:
ethBridgeData := any(bridgeData).(bindings.StandardBridgeETHBridgeFinalized) ethBridgeData := any(bridgeData).(bindings.StandardBridgeETHBridgeFinalized)
erc20BridgeData = &bindings.StandardBridgeERC20BridgeFinalized{ erc20BridgeData = &bindings.StandardBridgeERC20BridgeFinalized{
Raw: *log,
// Represent ETH using the hardcoded address // Represent ETH using the hardcoded address
LocalToken: predeploys.LegacyERC20ETHAddr, RemoteToken: predeploys.LegacyERC20ETHAddr, LocalToken: predeploys.LegacyERC20ETHAddr, RemoteToken: predeploys.LegacyERC20ETHAddr,
// Bridge data // Bridge data
...@@ -266,9 +274,14 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge ...@@ -266,9 +274,14 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.StandardBridgeETHBridge
case bindings.StandardBridgeERC20BridgeFinalized: case bindings.StandardBridgeERC20BridgeFinalized:
_temp := any(bridgeData).(bindings.StandardBridgeERC20BridgeFinalized) _temp := any(bridgeData).(bindings.StandardBridgeERC20BridgeFinalized)
erc20BridgeData = &_temp erc20BridgeData = &_temp
erc20BridgeData.Raw = *log
} }
finalizedBridgeEvents[i] = StandardBridgeFinalizedEvent{erc20BridgeData, nonce, bridgeFinalizedEvent} finalizedBridgeEvents[i] = StandardBridgeFinalizedEvent{
StandardBridgeERC20BridgeFinalized: erc20BridgeData,
CrossDomainMessengerNonce: nonce,
RawEvent: bridgeFinalizedEvent,
}
} }
return finalizedBridgeEvents, nil return finalizedBridgeEvents, nil
......
...@@ -226,6 +226,8 @@ func (s *channelManager) processBlocks() error { ...@@ -226,6 +226,8 @@ func (s *channelManager) processBlocks() error {
} else if err != nil { } else if err != nil {
return fmt.Errorf("adding block[%d] to channel builder: %w", i, err) return fmt.Errorf("adding block[%d] to channel builder: %w", i, err)
} }
s.log.Debug("Added block to channel", "channel", s.currentChannel.ID(), "block", block)
blocksAdded += 1 blocksAdded += 1
latestL2ref = l2BlockRefFromBlockAndL1Info(block, l1info) latestL2ref = l2BlockRefFromBlockAndL1Info(block, l1info)
s.metr.RecordL2BlockInChannel(block) s.metr.RecordL2BlockInChannel(block)
......
This diff is collapsed.
This diff is collapsed.
...@@ -13,7 +13,7 @@ const WETH9StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src ...@@ -13,7 +13,7 @@ const WETH9StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src
var WETH9StorageLayout = new(solc.StorageLayout) var WETH9StorageLayout = new(solc.StorageLayout)
var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a72315820e99c192477b0fd152c4048736f299c82338ac75b0e85ff6bfcba12ff17b4e06c64736f6c63430005110032" var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a7231582067ace7518c12acba9306ac90d5b712c789ea3d50860ef4996a20921a3c1e61f764736f6c63430005110032"
func init() { func init() {
if err := json.Unmarshal([]byte(WETH9StorageLayoutJSON), WETH9StorageLayout); err != nil { if err := json.Unmarshal([]byte(WETH9StorageLayoutJSON), WETH9StorageLayout); err != nil {
......
...@@ -134,23 +134,19 @@ func (a *Agent) step(ctx context.Context, claim types.Claim, game types.Game) er ...@@ -134,23 +134,19 @@ func (a *Agent) step(ctx context.Context, claim types.Claim, game types.Game) er
return nil return nil
} }
oracleData, err := a.solver.GetOracleData(ctx, claim)
if err != nil {
a.log.Debug("Failed to get oracle data", "err", err)
return nil
}
a.log.Info("Updating oracle data", "oracleKey", oracleData.OracleKey, "oracleData", oracleData.OracleData)
if err := a.updater.UpdateOracle(ctx, oracleData); err != nil {
return fmt.Errorf("failed to load oracle data: %w", err)
}
a.log.Info("Attempting step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth) a.log.Info("Attempting step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
step, err := a.solver.AttemptStep(ctx, claim, agreeWithClaimLevel) step, err := a.solver.AttemptStep(ctx, claim, agreeWithClaimLevel)
if err != nil { if err != nil {
return fmt.Errorf("attempt step: %w", err) return fmt.Errorf("attempt step: %w", err)
} }
if step.OracleData != nil {
a.log.Info("Updating oracle data", "oracleKey", step.OracleData.OracleKey, "oracleData", step.OracleData.OracleData)
if err := a.updater.UpdateOracle(ctx, step.OracleData); err != nil {
return fmt.Errorf("failed to load oracle data: %w", err)
}
}
a.log.Info("Performing step", "is_attack", step.IsAttack, a.log.Info("Performing step", "is_attack", step.IsAttack,
"depth", step.LeafClaim.Depth(), "index_at_depth", step.LeafClaim.IndexAtDepth(), "value", step.LeafClaim.Value) "depth", step.LeafClaim.Depth(), "index_at_depth", step.LeafClaim.IndexAtDepth(), "value", step.LeafClaim.Value)
callData := types.StepCallData{ callData := types.StepCallData{
......
...@@ -30,27 +30,31 @@ func NewTraceProvider(state string, depth uint64) *AlphabetTraceProvider { ...@@ -30,27 +30,31 @@ func NewTraceProvider(state string, depth uint64) *AlphabetTraceProvider {
} }
} }
// GetOracleData should not return any preimage oracle data for the alphabet provider. func (ap *AlphabetTraceProvider) GetStepData(ctx context.Context, i uint64) ([]byte, []byte, *types.PreimageOracleData, error) {
func (p *AlphabetTraceProvider) GetOracleData(ctx context.Context, i uint64) (*types.PreimageOracleData, error) { if i == 0 {
return &types.PreimageOracleData{}, nil prestate, err := ap.AbsolutePreState(ctx)
} if err != nil {
return nil, nil, nil, err
// GetPreimage returns the preimage for the given hash. }
func (ap *AlphabetTraceProvider) GetPreimage(ctx context.Context, i uint64) ([]byte, []byte, error) { return prestate, []byte{}, nil, nil
}
// We want the pre-state which is the value prior to the one requested
i--
// The index cannot be larger than the maximum index as computed by the depth. // The index cannot be larger than the maximum index as computed by the depth.
if i >= ap.maxLen { if i >= ap.maxLen {
return nil, nil, ErrIndexTooLarge return nil, nil, nil, ErrIndexTooLarge
} }
// We extend the deepest hash to the maximum depth if the trace is not expansive. // We extend the deepest hash to the maximum depth if the trace is not expansive.
if i >= uint64(len(ap.state)) { if i >= uint64(len(ap.state)) {
return ap.GetPreimage(ctx, uint64(len(ap.state))-1) return ap.GetStepData(ctx, uint64(len(ap.state)))
} }
return BuildAlphabetPreimage(i, ap.state[i]), []byte{}, nil return BuildAlphabetPreimage(i, ap.state[i]), []byte{}, nil, nil
} }
// Get returns the claim value at the given index in the trace. // Get returns the claim value at the given index in the trace.
func (ap *AlphabetTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) { func (ap *AlphabetTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) {
claimBytes, _, err := ap.GetPreimage(ctx, i) // Step data returns the pre-state, so add 1 to get the state for index i
claimBytes, _, _, err := ap.GetStepData(ctx, i+1)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
......
...@@ -58,20 +58,21 @@ func FuzzIndexToBytes(f *testing.F) { ...@@ -58,20 +58,21 @@ func FuzzIndexToBytes(f *testing.F) {
// TestGetPreimage_Succeeds tests the GetPreimage function // TestGetPreimage_Succeeds tests the GetPreimage function
// returns the correct pre-image for a index. // returns the correct pre-image for a index.
func TestGetPreimage_Succeeds(t *testing.T) { func TestGetStepData_Succeeds(t *testing.T) {
ap := NewTraceProvider("abc", 2) ap := NewTraceProvider("abc", 2)
expected := BuildAlphabetPreimage(0, "a'") expected := BuildAlphabetPreimage(0, "a'")
retrieved, proof, err := ap.GetPreimage(context.Background(), uint64(0)) retrieved, proof, data, err := ap.GetStepData(context.Background(), uint64(1))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expected, retrieved) require.Equal(t, expected, retrieved)
require.Empty(t, proof) require.Empty(t, proof)
require.Nil(t, data)
} }
// TestGetPreimage_TooLargeIndex_Fails tests the GetPreimage // TestGetPreimage_TooLargeIndex_Fails tests the GetPreimage
// function errors if the index is too large. // function errors if the index is too large.
func TestGetPreimage_TooLargeIndex_Fails(t *testing.T) { func TestGetStepData_TooLargeIndex_Fails(t *testing.T) {
ap := NewTraceProvider("abc", 2) ap := NewTraceProvider("abc", 2)
_, _, err := ap.GetPreimage(context.Background(), 4) _, _, _, err := ap.GetStepData(context.Background(), 5)
require.ErrorIs(t, err, ErrIndexTooLarge) require.ErrorIs(t, err, ErrIndexTooLarge)
} }
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/fault/types"
...@@ -47,12 +46,14 @@ type CannonTraceProvider struct { ...@@ -47,12 +46,14 @@ type CannonTraceProvider struct {
// lastStep stores the last step in the actual trace if known. 0 indicates unknown. // lastStep stores the last step in the actual trace if known. 0 indicates unknown.
// Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace.
lastStep uint64 lastStep uint64
// lastProof stores the proof data to use for all steps extended beyond lastStep
lastProof *proofData
} }
func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config, l1Client bind.ContractCaller) (*CannonTraceProvider, error) { func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config, l1Client bind.ContractCaller) (*CannonTraceProvider, error) {
l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2) l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2)
if err != nil { if err != nil {
return nil, fmt.Errorf("dial l2 cleint %v: %w", cfg.CannonL2, err) return nil, fmt.Errorf("dial l2 client %v: %w", cfg.CannonL2, err)
} }
defer l2Client.Close() // Not needed after fetching the inputs defer l2Client.Close() // Not needed after fetching the inputs
gameCaller, err := bindings.NewFaultDisputeGameCaller(cfg.GameAddress, l1Client) gameCaller, err := bindings.NewFaultDisputeGameCaller(cfg.GameAddress, l1Client)
...@@ -71,24 +72,11 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config ...@@ -71,24 +72,11 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config
}, nil }, nil
} }
func (p *CannonTraceProvider) GetOracleData(ctx context.Context, i uint64) (*types.PreimageOracleData, error) {
proof, err := p.loadProofData(ctx, i)
if err != nil {
return nil, err
}
data := types.NewPreimageOracleData(proof.OracleKey, proof.OracleValue, proof.OracleOffset)
return &data, nil
}
func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) { func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) {
proof, state, err := p.loadProof(ctx, i) proof, err := p.loadProof(ctx, i)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
if proof == nil && state != nil {
// Use the hash from the final state
return crypto.Keccak256Hash(state.EncodeWitness()), nil
}
value := common.BytesToHash(proof.ClaimValue) value := common.BytesToHash(proof.ClaimValue)
if value == (common.Hash{}) { if value == (common.Hash{}) {
...@@ -97,66 +85,46 @@ func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, e ...@@ -97,66 +85,46 @@ func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, e
return value, nil return value, nil
} }
func (p *CannonTraceProvider) GetPreimage(ctx context.Context, i uint64) ([]byte, []byte, error) { func (p *CannonTraceProvider) GetStepData(ctx context.Context, i uint64) ([]byte, []byte, *types.PreimageOracleData, error) {
proof, err := p.loadProofData(ctx, i) proof, err := p.loadProof(ctx, i)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
value := ([]byte)(proof.StateData) value := ([]byte)(proof.StateData)
if len(value) == 0 { if len(value) == 0 {
return nil, nil, errors.New("proof missing state data") return nil, nil, nil, errors.New("proof missing state data")
} }
data := ([]byte)(proof.ProofData) data := ([]byte)(proof.ProofData)
if len(data) == 0 { if data == nil {
return nil, nil, errors.New("proof missing proof data") return nil, nil, nil, errors.New("proof missing proof data")
}
var oracleData *types.PreimageOracleData
if len(proof.OracleKey) > 0 {
oracleData = types.NewPreimageOracleData(proof.OracleKey, proof.OracleValue, proof.OracleOffset)
} }
return value, data, nil return value, data, oracleData, nil
} }
func (p *CannonTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error) { func (p *CannonTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error) {
state, err := parseState(p.prestate) state, err := parseState(p.prestate)
if err != nil { if err != nil {
return []byte{}, fmt.Errorf("cannot load absolute pre-state: %w", err) return nil, fmt.Errorf("cannot load absolute pre-state: %w", err)
} }
return state.EncodeWitness(), nil return state.EncodeWitness(), nil
} }
// loadProofData loads the proof data for the specified step.
// If the requested index is beyond the end of the actual trace, the proof data from the last step is returned.
// Cannon will be executed a second time if required to generate the full proof data.
func (p *CannonTraceProvider) loadProofData(ctx context.Context, i uint64) (*proofData, error) {
proof, state, err := p.loadProof(ctx, i)
if err != nil {
return nil, err
} else if proof == nil && state != nil {
p.logger.Info("Re-executing to generate proof for last step", "step", state.Step)
proof, _, err = p.loadProof(ctx, state.Step)
if err != nil {
return nil, err
}
if proof == nil {
return nil, fmt.Errorf("proof at step %v was not generated", i)
}
return proof, nil
}
return proof, nil
}
// loadProof will attempt to load or generate the proof data at the specified index // loadProof will attempt to load or generate the proof data at the specified index
// If the requested index is beyond the end of the actual trace: // If the requested index is beyond the end of the actual trace it is extended with no-op instructions.
// - When the actual trace length is known, the proof data from the last step is returned with nil state func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) {
// - When the actual trace length is not yet know, the state from after the last step is returned with nil proofData if p.lastProof != nil && i > p.lastStep {
// and the actual trace length is cached for future runs // If the requested index is after the last step in the actual trace, extend the final no-op step
func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, *mipsevm.State, error) { return p.lastProof, nil
if p.lastStep != 0 && i > p.lastStep {
// If the requested index is after the last step in the actual trace, use the last step
i = p.lastStep
} }
path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json", i)) path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json", i))
file, err := os.Open(path) file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil { if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil {
return nil, nil, fmt.Errorf("generate cannon trace with proof at %v: %w", i, err) return nil, fmt.Errorf("generate cannon trace with proof at %v: %w", i, err)
} }
// Try opening the file again now and it should exist. // Try opening the file again now and it should exist.
file, err = os.Open(path) file, err = os.Open(path)
...@@ -164,27 +132,39 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa ...@@ -164,27 +132,39 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
// Expected proof wasn't generated, check if we reached the end of execution // Expected proof wasn't generated, check if we reached the end of execution
state, err := parseState(filepath.Join(p.dir, finalState)) state, err := parseState(filepath.Join(p.dir, finalState))
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot read final state: %w", err) return nil, fmt.Errorf("cannot read final state: %w", err)
} }
if state.Exited && state.Step < i { if state.Exited && state.Step <= i {
p.logger.Warn("Requested proof was after the program exited", "proof", i, "last", state.Step) p.logger.Warn("Requested proof was after the program exited", "proof", i, "last", state.Step)
// The final instruction has already been applied to this state, so the last step we can execute // The final instruction has already been applied to this state, so the last step we can execute
// is one before its Step value. // is one before its Step value.
p.lastStep = state.Step - 1 p.lastStep = state.Step - 1
return nil, state, nil // Extend the trace out to the full length using a no-op instruction that doesn't change any state
// No execution is done, so no proof-data or oracle values are required.
witness := state.EncodeWitness()
proof := &proofData{
ClaimValue: crypto.Keccak256(witness),
StateData: witness,
ProofData: []byte{},
OracleKey: nil,
OracleValue: nil,
OracleOffset: 0,
}
p.lastProof = proof
return proof, nil
} else { } else {
return nil, nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step) return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step)
} }
} }
} }
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot open proof file (%v): %w", path, err) return nil, fmt.Errorf("cannot open proof file (%v): %w", path, err)
} }
defer file.Close() defer file.Close()
var proof proofData var proof proofData
err = json.NewDecoder(file).Decode(&proof) err = json.NewDecoder(file).Decode(&proof)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to read proof (%v): %w", path, err) return nil, fmt.Errorf("failed to read proof (%v): %w", path, err)
} }
return &proof, nil, nil return &proof, nil
} }
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
...@@ -61,21 +62,22 @@ func TestGet(t *testing.T) { ...@@ -61,21 +62,22 @@ func TestGet(t *testing.T) {
}) })
} }
func TestGetOracleData(t *testing.T) { func TestGetStepData(t *testing.T) {
dataDir, prestate := setupTestData(t) dataDir, prestate := setupTestData(t)
t.Run("ExistingProof", func(t *testing.T) { t.Run("ExistingProof", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
oracleData, err := provider.GetOracleData(context.Background(), 420) value, proof, data, err := provider.GetStepData(context.Background(), 0)
require.NoError(t, err) require.NoError(t, err)
require.False(t, oracleData.IsLocal) expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000")
expectedKey := common.Hex2Bytes("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee") require.Equal(t, expected, value)
require.Equal(t, expectedKey, oracleData.OracleKey) expectedProof := common.Hex2Bytes("08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004")
expectedData := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") require.Equal(t, expectedProof, proof)
require.Equal(t, expectedData, oracleData.OracleData) // TODO: Need to add some oracle data
require.Nil(t, data)
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
}) })
t.Run("ProofAfterEndOfTrace", func(t *testing.T) { t.Run("GenerateProof", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
generator.finalState = &mipsevm.State{ generator.finalState = &mipsevm.State{
Memory: &mipsevm.Memory{}, Memory: &mipsevm.Memory{},
...@@ -90,39 +92,14 @@ func TestGetOracleData(t *testing.T) { ...@@ -90,39 +92,14 @@ func TestGetOracleData(t *testing.T) {
OracleValue: []byte{0xdd}, OracleValue: []byte{0xdd},
OracleOffset: 10, OracleOffset: 10,
} }
oracleData, err := provider.GetOracleData(context.Background(), 7000) preimage, proof, data, err := provider.GetStepData(context.Background(), 4)
require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
require.Contains(t, generator.generated, 9, "should have regenerated proof from last step")
require.False(t, oracleData.IsLocal)
require.EqualValues(t, generator.proof.OracleKey, oracleData.OracleKey)
require.EqualValues(t, generator.proof.OracleValue, oracleData.OracleData)
})
t.Run("IgnoreUnknownFields", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate)
oracleData, err := provider.GetOracleData(context.Background(), 421)
require.NoError(t, err) require.NoError(t, err)
require.False(t, oracleData.IsLocal) require.Contains(t, generator.generated, 4, "should have tried to generate the proof")
expectedKey := common.Hex2Bytes("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
require.Equal(t, expectedKey, oracleData.OracleKey)
expectedData := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
require.Equal(t, expectedData, oracleData.OracleData)
require.Empty(t, generator.generated)
})
}
func TestGetPreimage(t *testing.T) { require.EqualValues(t, generator.proof.StateData, preimage)
dataDir, prestate := setupTestData(t) require.EqualValues(t, generator.proof.ProofData, proof)
t.Run("ExistingProof", func(t *testing.T) { expectedData := types.NewPreimageOracleData(generator.proof.OracleKey, generator.proof.OracleValue, generator.proof.OracleOffset)
provider, generator := setupWithTestData(t, dataDir, prestate) require.EqualValues(t, expectedData, data)
value, proof, err := provider.GetPreimage(context.Background(), 0)
require.NoError(t, err)
expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000")
require.Equal(t, expected, value)
expectedProof := common.Hex2Bytes("08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004")
require.Equal(t, expectedProof, proof)
require.Empty(t, generator.generated)
}) })
t.Run("ProofAfterEndOfTrace", func(t *testing.T) { t.Run("ProofAfterEndOfTrace", func(t *testing.T) {
...@@ -140,30 +117,33 @@ func TestGetPreimage(t *testing.T) { ...@@ -140,30 +117,33 @@ func TestGetPreimage(t *testing.T) {
OracleValue: []byte{0xdd}, OracleValue: []byte{0xdd},
OracleOffset: 10, OracleOffset: 10,
} }
preimage, proof, err := provider.GetPreimage(context.Background(), 7000) preimage, proof, data, err := provider.GetStepData(context.Background(), 7000)
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
require.Contains(t, generator.generated, 9, "should have regenerated proof from last step")
require.EqualValues(t, generator.proof.StateData, preimage) witness := generator.finalState.EncodeWitness()
require.EqualValues(t, generator.proof.ProofData, proof) require.EqualValues(t, witness, preimage)
require.Equal(t, []byte{}, proof)
require.Nil(t, data)
}) })
t.Run("MissingStateData", func(t *testing.T) { t.Run("MissingStateData", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
_, _, err := provider.GetPreimage(context.Background(), 1) _, _, _, err := provider.GetStepData(context.Background(), 1)
require.ErrorContains(t, err, "missing state data") require.ErrorContains(t, err, "missing state data")
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
}) })
t.Run("IgnoreUnknownFields", func(t *testing.T) { t.Run("IgnoreUnknownFields", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
value, proof, err := provider.GetPreimage(context.Background(), 2) value, proof, data, err := provider.GetStepData(context.Background(), 2)
require.NoError(t, err) require.NoError(t, err)
expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
require.Equal(t, expected, value) require.Equal(t, expected, value)
expectedProof := common.Hex2Bytes("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") expectedProof := common.Hex2Bytes("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
require.Equal(t, expectedProof, proof) require.Equal(t, expectedProof, proof)
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
require.Nil(t, data)
}) })
} }
......
...@@ -87,9 +87,6 @@ func NewOracleUpdaterWithOracle( ...@@ -87,9 +87,6 @@ func NewOracleUpdaterWithOracle(
// UpdateOracle updates the oracle with the given data. // UpdateOracle updates the oracle with the given data.
func (u *cannonUpdater) UpdateOracle(ctx context.Context, data *types.PreimageOracleData) error { func (u *cannonUpdater) UpdateOracle(ctx context.Context, data *types.PreimageOracleData) error {
if len(data.OracleKey) == 0 {
return nil
}
if data.IsLocal { if data.IsLocal {
return u.sendLocalOracleData(ctx, data) return u.sendLocalOracleData(ctx, data)
} }
......
...@@ -89,12 +89,6 @@ func TestCannonUpdater_UpdateOracle(t *testing.T) { ...@@ -89,12 +89,6 @@ func TestCannonUpdater_UpdateOracle(t *testing.T) {
})) }))
require.Equal(t, 1, mockTxMgr.failedSends) require.Equal(t, 1, mockTxMgr.failedSends)
}) })
t.Run("skip empty data", func(t *testing.T) {
updater, mockTxMgr := newTestCannonUpdater(t, true)
require.NoError(t, updater.UpdateOracle(context.Background(), &types.PreimageOracleData{}))
require.Equal(t, 0, mockTxMgr.sends)
})
} }
// TestCannonUpdater_BuildLocalOracleData tests the [cannonUpdater] // TestCannonUpdater_BuildLocalOracleData tests the [cannonUpdater]
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/fault/cannon" "github.com/ethereum-optimism/optimism/op-challenger/fault/cannon"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
...@@ -38,7 +39,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*se ...@@ -38,7 +39,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*se
return nil, fmt.Errorf("failed to create the transaction manager: %w", err) return nil, fmt.Errorf("failed to create the transaction manager: %w", err)
} }
client, err := ethclient.Dial(cfg.L1EthRpc) client, err := client.DialEthClientWithTimeout(client.DefaultDialTimeout, logger, cfg.L1EthRpc)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to dial L1: %w", err) return nil, fmt.Errorf("failed to dial L1: %w", err)
} }
......
...@@ -67,10 +67,7 @@ func newMockTraceProvider(prestateErrors bool, prestate []byte) *mockTraceProvid ...@@ -67,10 +67,7 @@ func newMockTraceProvider(prestateErrors bool, prestate []byte) *mockTraceProvid
func (m *mockTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) { func (m *mockTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) {
panic("not implemented") panic("not implemented")
} }
func (m *mockTraceProvider) GetOracleData(ctx context.Context, i uint64) (*types.PreimageOracleData, error) { func (m *mockTraceProvider) GetStepData(ctx context.Context, i uint64) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) {
panic("not implemented")
}
func (m *mockTraceProvider) GetPreimage(ctx context.Context, i uint64) (preimage []byte, proofData []byte, err error) {
panic("not implemented") panic("not implemented")
} }
func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error) { func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error) {
......
...@@ -28,12 +28,6 @@ func NewSolver(gameDepth int, traceProvider types.TraceProvider) *Solver { ...@@ -28,12 +28,6 @@ func NewSolver(gameDepth int, traceProvider types.TraceProvider) *Solver {
} }
} }
// GetOracleData returns the oracle data for the provided claim.
// It passes through to the [TraceProvider] by finding the trace index for the claim.
func (s *Solver) GetOracleData(ctx context.Context, claim types.Claim) (*types.PreimageOracleData, error) {
return s.trace.GetOracleData(ctx, claim.TraceIndex(s.gameDepth))
}
// NextMove returns the next move to make given the current state of the game. // NextMove returns the next move to make given the current state of the game.
func (s *Solver) NextMove(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (*types.Claim, error) { func (s *Solver) NextMove(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (*types.Claim, error) {
if agreeWithClaimLevel { if agreeWithClaimLevel {
...@@ -58,7 +52,7 @@ type StepData struct { ...@@ -58,7 +52,7 @@ type StepData struct {
IsAttack bool IsAttack bool
PreState []byte PreState []byte
ProofData []byte ProofData []byte
OracleData types.PreimageOracleData OracleData *types.PreimageOracleData
} }
// AttemptStep determines what step should occur for a given leaf claim. // AttemptStep determines what step should occur for a given leaf claim.
...@@ -77,35 +71,30 @@ func (s *Solver) AttemptStep(ctx context.Context, claim types.Claim, agreeWithCl ...@@ -77,35 +71,30 @@ func (s *Solver) AttemptStep(ctx context.Context, claim types.Claim, agreeWithCl
index := claim.TraceIndex(s.gameDepth) index := claim.TraceIndex(s.gameDepth)
var preState []byte var preState []byte
var proofData []byte var proofData []byte
// If we are attacking index 0, we provide the absolute pre-state, not an intermediate state var oracleData *types.PreimageOracleData
if index == 0 && !claimCorrect {
state, err := s.trace.AbsolutePreState(ctx) if !claimCorrect {
// Attack the claim by executing step index, so we need to get the pre-state of that index
preState, proofData, oracleData, err = s.trace.GetStepData(ctx, index)
if err != nil { if err != nil {
return StepData{}, err return StepData{}, err
} }
preState = state
} else { } else {
// If attacking, get the state just before, other get the state after // We agree with the claim so Defend and use this claim as the starting point to execute the step after
if !claimCorrect { // Thus we need the pre-state of the next step
index = index - 1 // Note: This makes our maximum depth 63 because we need to add 1 without overflowing.
} preState, proofData, oracleData, err = s.trace.GetStepData(ctx, index+1)
preState, proofData, err = s.trace.GetPreimage(ctx, index)
if err != nil { if err != nil {
return StepData{}, err return StepData{}, err
} }
} }
oracleData, err := s.trace.GetOracleData(ctx, index)
if err != nil {
return StepData{}, err
}
return StepData{ return StepData{
LeafClaim: claim, LeafClaim: claim,
IsAttack: !claimCorrect, IsAttack: !claimCorrect,
PreState: preState, PreState: preState,
ProofData: proofData, ProofData: proofData,
OracleData: *oracleData, OracleData: oracleData,
}, nil }, nil
} }
......
...@@ -113,9 +113,6 @@ func TestAttemptStep(t *testing.T) { ...@@ -113,9 +113,6 @@ func TestAttemptStep(t *testing.T) {
ctx := context.Background() ctx := context.Background()
preState, err := builder.CorrectTraceProvider().AbsolutePreState(ctx)
require.NoError(t, err)
tests := []struct { tests := []struct {
name string name string
claim types.Claim claim types.Claim
...@@ -124,63 +121,55 @@ func TestAttemptStep(t *testing.T) { ...@@ -124,63 +121,55 @@ func TestAttemptStep(t *testing.T) {
expectAttack bool expectAttack bool
expectPreState []byte expectPreState []byte
expectProofData []byte expectProofData []byte
expectedLocal bool expectedOracleData *types.PreimageOracleData
expectedOracleKey []byte
expectedOracleData []byte
}{ }{
{ {
name: "AttackFirstTraceIndex", name: "AttackFirstTraceIndex",
claim: builder.CreateLeafClaim(0, false), claim: builder.CreateLeafClaim(0, false),
expectAttack: true, expectAttack: true,
expectPreState: preState, expectPreState: builder.CorrectPreState(0),
expectProofData: nil, expectProofData: builder.CorrectProofData(0),
expectedOracleKey: []byte{byte(0)}, expectedOracleData: builder.CorrectOracleData(0),
expectedOracleData: []byte{byte(0)},
}, },
{ {
name: "DefendFirstTraceIndex", name: "DefendFirstTraceIndex",
claim: builder.CreateLeafClaim(0, true), claim: builder.CreateLeafClaim(0, true),
expectAttack: false, expectAttack: false,
expectPreState: builder.CorrectPreState(0), expectPreState: builder.CorrectPreState(1),
expectProofData: builder.CorrectProofData(0), expectProofData: builder.CorrectProofData(1),
expectedOracleKey: []byte{byte(0)}, expectedOracleData: builder.CorrectOracleData(1),
expectedOracleData: []byte{byte(0)},
}, },
{ {
name: "AttackMiddleTraceIndex", name: "AttackMiddleTraceIndex",
claim: builder.CreateLeafClaim(4, false), claim: builder.CreateLeafClaim(4, false),
expectAttack: true, expectAttack: true,
expectPreState: builder.CorrectPreState(3), expectPreState: builder.CorrectPreState(4),
expectProofData: builder.CorrectProofData(3), expectProofData: builder.CorrectProofData(4),
expectedOracleKey: []byte{byte(3)}, expectedOracleData: builder.CorrectOracleData(4),
expectedOracleData: []byte{byte(3)},
}, },
{ {
name: "DefendMiddleTraceIndex", name: "DefendMiddleTraceIndex",
claim: builder.CreateLeafClaim(4, true), claim: builder.CreateLeafClaim(4, true),
expectAttack: false, expectAttack: false,
expectPreState: builder.CorrectPreState(4), expectPreState: builder.CorrectPreState(5),
expectProofData: builder.CorrectProofData(4), expectProofData: builder.CorrectProofData(5),
expectedOracleKey: []byte{byte(4)}, expectedOracleData: builder.CorrectOracleData(5),
expectedOracleData: []byte{byte(4)},
}, },
{ {
name: "AttackLastTraceIndex", name: "AttackLastTraceIndex",
claim: builder.CreateLeafClaim(lastLeafTraceIndex, false), claim: builder.CreateLeafClaim(lastLeafTraceIndex, false),
expectAttack: true, expectAttack: true,
expectPreState: builder.CorrectPreState(lastLeafTraceIndex - 1), expectPreState: builder.CorrectPreState(lastLeafTraceIndex),
expectProofData: builder.CorrectProofData(lastLeafTraceIndex - 1), expectProofData: builder.CorrectProofData(lastLeafTraceIndex),
expectedOracleKey: []byte{byte(5)}, expectedOracleData: builder.CorrectOracleData(lastLeafTraceIndex),
expectedOracleData: []byte{byte(5)},
}, },
{ {
name: "DefendLastTraceIndex", name: "DefendLastTraceIndex",
claim: builder.CreateLeafClaim(lastLeafTraceIndex, true), claim: builder.CreateLeafClaim(lastLeafTraceIndex, true),
expectAttack: false, expectAttack: false,
expectPreState: builder.CorrectPreState(lastLeafTraceIndex), expectPreState: builder.CorrectPreState(lastLeafTraceIndex + 1),
expectProofData: builder.CorrectProofData(lastLeafTraceIndex), expectProofData: builder.CorrectProofData(lastLeafTraceIndex + 1),
expectedOracleKey: []byte{byte(6)}, expectedOracleData: builder.CorrectOracleData(lastLeafTraceIndex + 1),
expectedOracleData: []byte{byte(6)},
}, },
{ {
name: "CannotStepNonLeaf", name: "CannotStepNonLeaf",
...@@ -199,24 +188,6 @@ func TestAttemptStep(t *testing.T) { ...@@ -199,24 +188,6 @@ func TestAttemptStep(t *testing.T) {
agreeWithLevel: true, agreeWithLevel: true,
expectedErr: solver.ErrStepNonLeafNode, expectedErr: solver.ErrStepNonLeafNode,
}, },
{
name: "AttackLocalOracleData",
claim: builder.Seq(false).Attack(false).Attack(true).Defend(false).Get(),
expectAttack: true,
agreeWithLevel: false,
expectPreState: builder.CorrectPreState(1),
expectProofData: builder.CorrectProofData(1),
expectedLocal: true,
expectedOracleKey: []byte{0x01},
expectedOracleData: []byte{0x01},
expectedErr: nil,
},
{
name: "AttackStepOracleError",
claim: builder.Seq(false).Attack(false).Attack(false).Attack(false).Get(),
agreeWithLevel: false,
expectedErr: errProvider,
},
} }
for _, tableTest := range tests { for _, tableTest := range tests {
...@@ -235,9 +206,10 @@ func TestAttemptStep(t *testing.T) { ...@@ -235,9 +206,10 @@ func TestAttemptStep(t *testing.T) {
require.Equal(t, tableTest.expectAttack, step.IsAttack) require.Equal(t, tableTest.expectAttack, step.IsAttack)
require.Equal(t, tableTest.expectPreState, step.PreState) require.Equal(t, tableTest.expectPreState, step.PreState)
require.Equal(t, tableTest.expectProofData, step.ProofData) require.Equal(t, tableTest.expectProofData, step.ProofData)
require.Equal(t, tableTest.expectedLocal, step.OracleData.IsLocal) require.Equal(t, tableTest.expectedOracleData.IsLocal, step.OracleData.IsLocal)
require.Equal(t, tableTest.expectedOracleKey, step.OracleData.OracleKey) require.Equal(t, tableTest.expectedOracleData.OracleKey, step.OracleData.OracleKey)
require.Equal(t, tableTest.expectedOracleData, step.OracleData.OracleData) require.Equal(t, tableTest.expectedOracleData.OracleData, step.OracleData.OracleData)
require.Equal(t, tableTest.expectedOracleData.OracleOffset, step.OracleData.OracleOffset)
} else { } else {
require.ErrorIs(t, err, tableTest.expectedErr) require.ErrorIs(t, err, tableTest.expectedErr)
require.Equal(t, solver.StepData{}, step) require.Equal(t, solver.StepData{}, step)
......
...@@ -25,18 +25,11 @@ type alphabetWithProofProvider struct { ...@@ -25,18 +25,11 @@ type alphabetWithProofProvider struct {
OracleError error OracleError error
} }
func (a *alphabetWithProofProvider) GetPreimage(ctx context.Context, i uint64) ([]byte, []byte, error) { func (a *alphabetWithProofProvider) GetStepData(ctx context.Context, i uint64) ([]byte, []byte, *types.PreimageOracleData, error) {
preimage, _, err := a.AlphabetTraceProvider.GetPreimage(ctx, i) preimage, _, _, err := a.AlphabetTraceProvider.GetStepData(ctx, i)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
return preimage, []byte{byte(i)}, nil data := types.NewPreimageOracleData([]byte{byte(i)}, []byte{byte(i - 1)}, uint32(i-1))
} return preimage, []byte{byte(i - 1)}, data, nil
func (a *alphabetWithProofProvider) GetOracleData(ctx context.Context, i uint64) (*types.PreimageOracleData, error) {
if a.OracleError != nil {
return &types.PreimageOracleData{}, a.OracleError
}
data := types.NewPreimageOracleData([]byte{byte(i)}, []byte{byte(i)}, uint32(i))
return &data, nil
} }
...@@ -38,20 +38,26 @@ func (c *ClaimBuilder) CorrectClaim(idx uint64) common.Hash { ...@@ -38,20 +38,26 @@ func (c *ClaimBuilder) CorrectClaim(idx uint64) common.Hash {
return value return value
} }
// CorrectPreState returns the pre-image of the canonical claim at the specified trace index // CorrectPreState returns the pre-state (not hashed) required to execute the valid step at the specified trace index
func (c *ClaimBuilder) CorrectPreState(idx uint64) []byte { func (c *ClaimBuilder) CorrectPreState(idx uint64) []byte {
preimage, _, err := c.correct.GetPreimage(context.Background(), idx) preimage, _, _, err := c.correct.GetStepData(context.Background(), idx)
c.require.NoError(err) c.require.NoError(err)
return preimage return preimage
} }
// CorrectProofData returns the proof-data for the canonical claim at the specified trace index // CorrectProofData returns the proof-data required to execute the valid step at the specified trace index
func (c *ClaimBuilder) CorrectProofData(idx uint64) []byte { func (c *ClaimBuilder) CorrectProofData(idx uint64) []byte {
_, proof, err := c.correct.GetPreimage(context.Background(), idx) _, proof, _, err := c.correct.GetStepData(context.Background(), idx)
c.require.NoError(err) c.require.NoError(err)
return proof return proof
} }
func (c *ClaimBuilder) CorrectOracleData(idx uint64) *types.PreimageOracleData {
_, _, data, err := c.correct.GetStepData(context.Background(), idx)
c.require.NoError(err)
return data
}
func (c *ClaimBuilder) incorrectClaim(idx uint64) common.Hash { func (c *ClaimBuilder) incorrectClaim(idx uint64) common.Hash {
return common.BigToHash(new(big.Int).SetUint64(idx)) return common.BigToHash(new(big.Int).SetUint64(idx))
} }
......
...@@ -45,8 +45,8 @@ func (p *PreimageOracleData) GetPreimageWithoutSize() []byte { ...@@ -45,8 +45,8 @@ func (p *PreimageOracleData) GetPreimageWithoutSize() []byte {
} }
// NewPreimageOracleData creates a new [PreimageOracleData] instance. // NewPreimageOracleData creates a new [PreimageOracleData] instance.
func NewPreimageOracleData(key []byte, data []byte, offset uint32) PreimageOracleData { func NewPreimageOracleData(key []byte, data []byte, offset uint32) *PreimageOracleData {
return PreimageOracleData{ return &PreimageOracleData{
IsLocal: len(key) > 0 && key[0] == byte(1), IsLocal: len(key) > 0 && key[0] == byte(1),
OracleKey: key, OracleKey: key,
OracleData: data, OracleData: data,
...@@ -74,17 +74,14 @@ type TraceProvider interface { ...@@ -74,17 +74,14 @@ type TraceProvider interface {
// Get(i) = Keccak256(GetPreimage(i)) // Get(i) = Keccak256(GetPreimage(i))
Get(ctx context.Context, i uint64) (common.Hash, error) Get(ctx context.Context, i uint64) (common.Hash, error)
// GetOracleData returns preimage oracle data that can be submitted to the pre-image // GetStepData returns the data required to execute the step at the specified trace index.
// oracle and the dispute game contract. This function accepts a trace index for // This includes the pre-state of the step (not hashed), the proof data required during step execution
// which the provider returns needed preimage data. // and any pre-image data that needs to be loaded into the oracle prior to execution (may be nil)
GetOracleData(ctx context.Context, i uint64) (*PreimageOracleData, error) // The prestate returned from GetStepData for trace 10 should be the pre-image of the claim from trace 9
GetStepData(ctx context.Context, i uint64) (prestate []byte, proofData []byte, preimageData *PreimageOracleData, err error)
// GetPreimage returns the pre-image for a claim at the specified trace index, along
// with any associated proof data to assist in its verification.
GetPreimage(ctx context.Context, i uint64) (preimage []byte, proofData []byte, err error)
// AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0 // AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreState(ctx context.Context) ([]byte, error) AbsolutePreState(ctx context.Context) (preimage []byte, err error)
} }
// ClaimData is the core of a claim. It must be unique inside a specific game. // ClaimData is the core of a claim. It must be unique inside a specific game.
......
File mode changed from 100644 to 100755
...@@ -26,6 +26,22 @@ type Option func(config2 *config.Config) ...@@ -26,6 +26,22 @@ type Option func(config2 *config.Config)
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper { func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlInfo).New("role", name) log := testlog.Logger(t, log.LvlInfo).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint) log.Info("Creating challenger", "l1", l1Endpoint)
cfg := NewChallengerConfig(t, l1Endpoint, options...)
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer close(errCh)
errCh <- op_challenger.Main(ctx, log, cfg)
}()
return &Helper{
log: log,
cancel: cancel,
errors: errCh,
}
}
func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *config.Config {
txmgrCfg := txmgr.NewCLIConfig(l1Endpoint) txmgrCfg := txmgr.NewCLIConfig(l1Endpoint)
txmgrCfg.NumConfirmations = 1 txmgrCfg.NumConfirmations = 1
txmgrCfg.ReceiptQueryInterval = 1 * time.Second txmgrCfg.ReceiptQueryInterval = 1 * time.Second
...@@ -53,18 +69,7 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st ...@@ -53,18 +69,7 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st
_, err := os.Stat(cfg.CannonAbsolutePreState) _, err := os.Stat(cfg.CannonAbsolutePreState)
require.NoError(t, err, "cannon pre-state should be built. Make sure you've run make cannon-prestate") require.NoError(t, err, "cannon pre-state should be built. Make sure you've run make cannon-prestate")
} }
return cfg
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer close(errCh)
errCh <- op_challenger.Main(ctx, log, cfg)
}()
return &Helper{
log: log,
cancel: cancel,
errors: errCh,
}
} }
func (h *Helper) Close() error { func (h *Helper) Close() error {
......
...@@ -7,9 +7,13 @@ import ( ...@@ -7,9 +7,13 @@ import (
"path/filepath" "path/filepath"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault/cannon"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log"
) )
type CannonGameHelper struct { type CannonGameHelper struct {
...@@ -17,31 +21,7 @@ type CannonGameHelper struct { ...@@ -17,31 +21,7 @@ type CannonGameHelper struct {
} }
func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollup.Config, l2Genesis *core.Genesis, l1Endpoint string, l2Endpoint string, name string, options ...challenger.Option) *challenger.Helper { func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollup.Config, l2Genesis *core.Genesis, l1Endpoint string, l2Endpoint string, name string, options ...challenger.Option) *challenger.Helper {
opts := []challenger.Option{ opts := []challenger.Option{g.createConfigOption(rollupCfg, l2Genesis, l2Endpoint)}
func(c *config.Config) {
c.GameAddress = g.addr
c.TraceType = config.TraceTypeCannon
c.AgreeWithProposedOutput = false
c.CannonL2 = l2Endpoint
c.CannonBin = "../cannon/bin/cannon"
c.CannonDatadir = g.t.TempDir()
c.CannonServer = "../op-program/bin/op-program"
c.CannonAbsolutePreState = "../op-program/bin/prestate.json"
c.CannonSnapshotFreq = 10_000_000
genesisBytes, err := json.Marshal(l2Genesis)
g.require.NoError(err, "marshall l2 genesis config")
genesisFile := filepath.Join(c.CannonDatadir, "l2-genesis.json")
g.require.NoError(os.WriteFile(genesisFile, genesisBytes, 0644))
c.CannonL2GenesisPath = genesisFile
rollupBytes, err := json.Marshal(rollupCfg)
g.require.NoError(err, "marshall rollup config")
rollupFile := filepath.Join(c.CannonDatadir, "rollup.json")
g.require.NoError(os.WriteFile(rollupFile, rollupBytes, 0644))
c.CannonRollupConfigPath = rollupFile
},
}
opts = append(opts, options...) opts = append(opts, options...)
c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...) c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...)
g.t.Cleanup(func() { g.t.Cleanup(func() {
...@@ -49,3 +29,44 @@ func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollu ...@@ -49,3 +29,44 @@ func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollu
}) })
return c return c
} }
func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rollup.Config, l2Genesis *core.Genesis, l1Client bind.ContractCaller, l1Endpoint string, l2Endpoint string, options ...challenger.Option) *HonestHelper {
opts := []challenger.Option{g.createConfigOption(rollupCfg, l2Genesis, l2Endpoint)}
opts = append(opts, options...)
cfg := challenger.NewChallengerConfig(g.t, l1Endpoint, opts...)
provider, err := cannon.NewTraceProvider(ctx, testlog.Logger(g.t, log.LvlTrace).New("role", "CorrectTrace"), cfg, l1Client)
g.require.NoError(err, "create cannon trace provider")
return &HonestHelper{
t: g.t,
require: g.require,
game: &g.FaultGameHelper,
correctTrace: provider,
}
}
func (g *CannonGameHelper) createConfigOption(rollupCfg *rollup.Config, l2Genesis *core.Genesis, l2Endpoint string) challenger.Option {
return func(c *config.Config) {
c.GameAddress = g.addr
c.TraceType = config.TraceTypeCannon
c.AgreeWithProposedOutput = false
c.CannonL2 = l2Endpoint
c.CannonBin = "../cannon/bin/cannon"
c.CannonDatadir = g.t.TempDir()
c.CannonServer = "../op-program/bin/op-program"
c.CannonAbsolutePreState = "../op-program/bin/prestate.json"
c.CannonSnapshotFreq = 10_000_000
genesisBytes, err := json.Marshal(l2Genesis)
g.require.NoError(err, "marshall l2 genesis config")
genesisFile := filepath.Join(c.CannonDatadir, "l2-genesis.json")
g.require.NoError(os.WriteFile(genesisFile, genesisBytes, 0644))
c.CannonL2GenesisPath = genesisFile
rollupBytes, err := json.Marshal(rollupCfg)
g.require.NoError(err, "marshall rollup config")
rollupFile := filepath.Join(c.CannonDatadir, "rollup.json")
g.require.NoError(os.WriteFile(rollupFile, rollupBytes, 0644))
c.CannonRollupConfigPath = rollupFile
}
}
...@@ -82,6 +82,16 @@ func (g *FaultGameHelper) WaitForClaim(ctx context.Context, predicate func(claim ...@@ -82,6 +82,16 @@ func (g *FaultGameHelper) WaitForClaim(ctx context.Context, predicate func(claim
g.require.NoError(err) g.require.NoError(err)
} }
// getClaim retrieves the claim data for a specific index.
// Note that it is deliberately not exported as tests should use WaitForClaim to avoid race conditions.
func (g *FaultGameHelper) getClaim(ctx context.Context, claimIdx int64) ContractClaim {
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: ctx}, big.NewInt(claimIdx))
if err != nil {
g.require.NoErrorf(err, "retrieve claim %v", claimIdx)
}
return claimData
}
func (g *FaultGameHelper) WaitForClaimAtMaxDepth(ctx context.Context, countered bool) { func (g *FaultGameHelper) WaitForClaimAtMaxDepth(ctx context.Context, countered bool) {
maxDepth := g.MaxDepth(ctx) maxDepth := g.MaxDepth(ctx)
g.WaitForClaim(ctx, func(claim ContractClaim) bool { g.WaitForClaim(ctx, func(claim ContractClaim) bool {
...@@ -122,3 +132,29 @@ func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim comm ...@@ -122,3 +132,29 @@ func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim comm
_, err = utils.WaitReceiptOK(ctx, g.client, tx.Hash()) _, err = utils.WaitReceiptOK(ctx, g.client, tx.Hash())
g.require.NoError(err, "Attack transaction was not OK") g.require.NoError(err, "Attack transaction was not OK")
} }
func (g *FaultGameHelper) Defend(ctx context.Context, claimIdx int64, claim common.Hash) {
tx, err := g.game.Defend(g.opts, big.NewInt(claimIdx), claim)
g.require.NoError(err, "Defend transaction did not send")
_, err = utils.WaitReceiptOK(ctx, g.client, tx.Hash())
g.require.NoError(err, "Defend transaction was not OK")
}
func (g *FaultGameHelper) LogGameData(ctx context.Context) {
opts := &bind.CallOpts{Context: ctx}
maxDepth := int(g.MaxDepth(ctx))
claimCount, err := g.game.ClaimDataLen(opts)
info := fmt.Sprintf("Claim count: %v\n", claimCount)
g.require.NoError(err, "Fetching claim count")
for i := int64(0); i < claimCount.Int64(); i++ {
claim, err := g.game.ClaimData(opts, big.NewInt(i))
g.require.NoErrorf(err, "Fetch claim %v", i)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, Value: %v, Countered: %v\n",
i, claim.Position.Int64(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), common.Hash(claim.Claim).Hex(), claim.Countered)
}
status, err := g.game.Status(opts)
g.require.NoError(err, "Load game status")
g.t.Logf("Game %v (%v):\n%v\n", g.addr, Status(status), info)
}
...@@ -84,7 +84,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1 ...@@ -84,7 +84,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1
} }
func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet string) *AlphabetGameHelper { func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet string) *AlphabetGameHelper {
h.waitForProposals(ctx) l2BlockNumber := h.waitForProposals(ctx)
l1Head := h.checkpointL1Block(ctx) l1Head := h.checkpointL1Block(ctx)
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
...@@ -94,7 +94,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -94,7 +94,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
rootClaim, err := trace.Get(ctx, lastAlphabetTraceIndex) rootClaim, err := trace.Get(ctx, lastAlphabetTraceIndex)
h.require.NoError(err, "get root claim") h.require.NoError(err, "get root claim")
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], uint64(8)) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, alphabetGameType, rootClaim, extraData) tx, err := h.factory.Create(h.opts, alphabetGameType, rootClaim, extraData)
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
...@@ -120,14 +120,14 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -120,14 +120,14 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
} }
func (h *FactoryHelper) StartCannonGame(ctx context.Context, rootClaim common.Hash) *CannonGameHelper { func (h *FactoryHelper) StartCannonGame(ctx context.Context, rootClaim common.Hash) *CannonGameHelper {
h.waitForProposals(ctx) l2BlockNumber := h.waitForProposals(ctx)
l1Head := h.checkpointL1Block(ctx) l1Head := h.checkpointL1Block(ctx)
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel() defer cancel()
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], uint64(8)) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, cannonGameType, rootClaim, extraData) tx, err := h.factory.Create(h.opts, cannonGameType, rootClaim, extraData)
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
...@@ -153,20 +153,30 @@ func (h *FactoryHelper) StartCannonGame(ctx context.Context, rootClaim common.Ha ...@@ -153,20 +153,30 @@ func (h *FactoryHelper) StartCannonGame(ctx context.Context, rootClaim common.Ha
// waitForProposals waits until there are at least two proposals in the output oracle // waitForProposals waits until there are at least two proposals in the output oracle
// This is the minimum required for creating a game. // This is the minimum required for creating a game.
func (h *FactoryHelper) waitForProposals(ctx context.Context) { // Returns the l2 block number of the latest available proposal
func (h *FactoryHelper) waitForProposals(ctx context.Context) uint64 {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel() defer cancel()
err := utils.WaitFor(ctx, time.Second, func() (bool, error) { opts := &bind.CallOpts{Context: ctx}
index, err := h.l2oo.LatestOutputIndex(&bind.CallOpts{Context: ctx}) latestOutputIndex, err := utils.WaitAndGet(
if err != nil { ctx,
h.t.Logf("Could not get latest output index: %v", err.Error()) time.Second,
return false, nil func() (*big.Int, error) {
} index, err := h.l2oo.LatestOutputIndex(opts)
h.t.Logf("Latest output index: %v", index) if err != nil {
return index.Cmp(big.NewInt(1)) >= 0, nil h.t.Logf("Could not get latest output index: %v", err.Error())
}) return nil, nil
}
h.t.Logf("Latest output index: %v", index)
return index, nil
},
func(index *big.Int) bool {
return index != nil && index.Cmp(big.NewInt(1)) >= 0
})
h.require.NoError(err, "Did not get two output roots") h.require.NoError(err, "Did not get two output roots")
output, err := h.l2oo.GetL2Output(opts, latestOutputIndex)
h.require.NoErrorf(err, "Could not get latst output root index: %v", latestOutputIndex)
return output.L2BlockNumber.Uint64()
} }
// checkpointL1Block stores the current L1 block in the oracle // checkpointL1Block stores the current L1 block in the oracle
......
package disputegame
import (
"context"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/stretchr/testify/require"
)
type HonestHelper struct {
t *testing.T
require *require.Assertions
game *FaultGameHelper
correctTrace types.TraceProvider
}
func (h *HonestHelper) Attack(ctx context.Context, claimIdx int64) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
claim := h.game.getClaim(ctx, claimIdx)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
attackPos := pos.Attack()
traceIdx := attackPos.TraceIndex(int(h.game.MaxDepth(ctx)))
h.t.Logf("Attacking at position %v using correct trace from index %v", attackPos.ToGIndex(), traceIdx)
value, err := h.correctTrace.Get(ctx, traceIdx)
h.require.NoErrorf(err, "Get correct claim at trace index %v", traceIdx)
h.t.Log("Performing attack")
h.game.Attack(ctx, claimIdx, value)
h.t.Log("Attack complete")
}
func (h *HonestHelper) Defend(ctx context.Context, claimIdx int64) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
claim := h.game.getClaim(ctx, claimIdx)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
defendPos := pos.Defend()
traceIdx := defendPos.TraceIndex(int(h.game.MaxDepth(ctx)))
value, err := h.correctTrace.Get(ctx, traceIdx)
h.game.require.NoErrorf(err, "Get correct claim at trace index %v", traceIdx)
h.game.Defend(ctx, claimIdx, value)
}
...@@ -21,6 +21,7 @@ func TestResolveDisputeGame(t *testing.T) { ...@@ -21,6 +21,7 @@ func TestResolveDisputeGame(t *testing.T) {
t.Cleanup(sys.Close) t.Cleanup(sys.Close)
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game := disputeGameFactory.StartAlphabetGame(ctx, "zyxwvut") game := disputeGameFactory.StartAlphabetGame(ctx, "zyxwvut")
require.NotNil(t, game) require.NotNil(t, game)
gameDuration := game.GameDuration(ctx) gameDuration := game.GameDuration(ctx)
...@@ -147,6 +148,67 @@ func TestChallengerCompleteDisputeGame(t *testing.T) { ...@@ -147,6 +148,67 @@ func TestChallengerCompleteDisputeGame(t *testing.T) {
func TestCannonDisputeGame(t *testing.T) { func TestCannonDisputeGame(t *testing.T) {
InitParallel(t) InitParallel(t)
tests := []struct {
name string
defendAtClaim int64
}{
{"StepFirst", 0},
{"StepMiddle", 28},
{"StepInExtension", 2},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
InitParallel(t)
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close)
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game := disputeGameFactory.StartCannonGame(ctx, common.Hash{0xaa})
require.NotNil(t, game)
game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("l1"), sys.NodeEndpoint("sequencer"), "Challenger", func(c *config.Config) {
c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Alice)
})
maxDepth := game.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; {
game.LogGameData(ctx)
claimCount++
// Wait for the challenger to counter
game.WaitForClaimCount(ctx, claimCount)
// Post our own counter to the latest challenger claim
if claimCount == test.defendAtClaim {
// Defend one claim so we don't wind up executing from the absolute pre-state
game.Defend(ctx, claimCount-1, common.Hash{byte(claimCount)})
} else {
game.Attack(ctx, claimCount-1, common.Hash{byte(claimCount)})
}
claimCount++
game.WaitForClaimCount(ctx, claimCount)
}
game.LogGameData(ctx)
// Wait for the challenger to call step and counter our invalid claim
game.WaitForClaimAtMaxDepth(ctx, true)
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, utils.WaitNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.LogGameData(ctx)
})
}
}
func TestCannonDefendStep(t *testing.T) {
InitParallel(t)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close) t.Cleanup(sys.Close)
...@@ -154,29 +216,47 @@ func TestCannonDisputeGame(t *testing.T) { ...@@ -154,29 +216,47 @@ func TestCannonDisputeGame(t *testing.T) {
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game := disputeGameFactory.StartCannonGame(ctx, common.Hash{0xaa}) game := disputeGameFactory.StartCannonGame(ctx, common.Hash{0xaa})
require.NotNil(t, game) require.NotNil(t, game)
game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("l1"), sys.NodeEndpoint("sequencer"), "Challenger", func(c *config.Config) { l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer")
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger", func(c *config.Config) {
c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Alice) c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Alice)
}) })
correctTrace := game.CreateHonestActor(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Client, l1Endpoint, l2Endpoint, func(c *config.Config) {
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Mallory)
})
maxDepth := game.MaxDepth(ctx) maxDepth := game.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; { for claimCount := int64(1); claimCount < maxDepth; {
game.LogGameData(ctx)
claimCount++ claimCount++
// Wait for the challenger to counter // Wait for the challenger to counter
game.WaitForClaimCount(ctx, claimCount) game.WaitForClaimCount(ctx, claimCount)
// Post our own counter to the latest challenger claim // Post invalid claims for most steps to get down into the early part of the trace
game.Attack(ctx, claimCount-1, common.Hash{byte(claimCount)}) if claimCount < 28 {
game.Attack(ctx, claimCount-1, common.Hash{byte(claimCount)})
} else {
// Post our own counter but using the correct hash in low levels to force a defense step
correctTrace.Attack(ctx, claimCount-1)
}
claimCount++ claimCount++
game.LogGameData(ctx)
game.WaitForClaimCount(ctx, claimCount) game.WaitForClaimCount(ctx, claimCount)
} }
game.WaitForClaimAtMaxDepth(ctx, false)
game.LogGameData(ctx)
// Wait for the challenger to call step and counter our invalid claim
game.WaitForClaimAtMaxDepth(ctx, true)
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx)) sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, utils.WaitNextBlock(ctx, l1Client)) require.NoError(t, utils.WaitNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.LogGameData(ctx)
} }
func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) { func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) {
......
...@@ -3,6 +3,8 @@ package client ...@@ -3,6 +3,8 @@ package client
import ( import (
"context" "context"
"fmt" "fmt"
"net"
"net/url"
"regexp" "regexp"
"time" "time"
...@@ -103,17 +105,31 @@ func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption) ...@@ -103,17 +105,31 @@ func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption)
func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, attempts int, opts ...rpc.ClientOption) (*rpc.Client, error) { func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, attempts int, opts ...rpc.ClientOption) (*rpc.Client, error) {
bOff := backoff.Exponential() bOff := backoff.Exponential()
return backoff.Do(ctx, attempts, bOff, func() (*rpc.Client, error) { return backoff.Do(ctx, attempts, bOff, func() (*rpc.Client, error) {
if !IsURLAvailable(addr) {
log.Warn("failed to dial address, but may connect later", "addr", addr)
return nil, fmt.Errorf("address unavailable (%s)", addr)
}
client, err := rpc.DialOptions(ctx, addr, opts...) client, err := rpc.DialOptions(ctx, addr, opts...)
if err != nil { if err != nil {
if client == nil { return nil, fmt.Errorf("failed to dial address (%s): %w", addr, err)
return nil, fmt.Errorf("failed to dial address (%s): %w", addr, err)
}
log.Warn("failed to dial address, but may connect later", "addr", addr, "err", err)
} }
return client, nil return client, nil
}) })
} }
func IsURLAvailable(address string) bool {
u, err := url.Parse(address)
if err != nil {
return false
}
conn, err := net.DialTimeout("tcp", u.Host, 5*time.Second)
if err != nil {
return false
}
conn.Close()
return true
}
// BaseRPCClient is a wrapper around a concrete *rpc.Client instance to make it compliant // BaseRPCClient is a wrapper around a concrete *rpc.Client instance to make it compliant
// with the client.RPC interface. // with the client.RPC interface.
// It sets a timeout of 10s on CallContext & 20s on BatchCallContext made through it. // It sets a timeout of 10s on CallContext & 20s on BatchCallContext made through it.
......
...@@ -76,32 +76,32 @@ func New(ctx context.Context, cfg *Config, log log.Logger, snapshotLog log.Logge ...@@ -76,32 +76,32 @@ func New(ctx context.Context, cfg *Config, log log.Logger, snapshotLog log.Logge
func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger) error { func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
if err := n.initTracer(ctx, cfg); err != nil { if err := n.initTracer(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the trace: %w", err)
} }
if err := n.initL1(ctx, cfg); err != nil { if err := n.initL1(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init L1: %w", err)
} }
if err := n.initRuntimeConfig(ctx, cfg); err != nil { if err := n.initRuntimeConfig(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the runtime config: %w", err)
} }
if err := n.initL2(ctx, cfg, snapshotLog); err != nil { if err := n.initL2(ctx, cfg, snapshotLog); err != nil {
return err return fmt.Errorf("failed to init L2: %w", err)
} }
if err := n.initRPCSync(ctx, cfg); err != nil { if err := n.initRPCSync(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init RPC sync: %w", err)
} }
if err := n.initP2PSigner(ctx, cfg); err != nil { if err := n.initP2PSigner(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the P2P signer: %w", err)
} }
if err := n.initP2P(ctx, cfg); err != nil { if err := n.initP2P(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the P2P stack: %w", err)
} }
// Only expose the server at the end, ensuring all RPC backend components are initialized. // Only expose the server at the end, ensuring all RPC backend components are initialized.
if err := n.initRPCServer(ctx, cfg); err != nil { if err := n.initRPCServer(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the RPC server: %w", err)
} }
if err := n.initMetricsServer(ctx, cfg); err != nil { if err := n.initMetricsServer(ctx, cfg); err != nil {
return err return fmt.Errorf("failed to init the metrics server: %w", err)
} }
return nil return nil
} }
...@@ -128,7 +128,7 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error { ...@@ -128,7 +128,7 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
} }
if err := cfg.Rollup.ValidateL1Config(ctx, n.l1Source); err != nil { if err := cfg.Rollup.ValidateL1Config(ctx, n.l1Source); err != nil {
return err return fmt.Errorf("failed to validate the L1 config: %w", err)
} }
// Keep subscribed to the L1 heads, which keeps the L1 maintainer pointing to the best headers to sync // Keep subscribed to the L1 heads, which keeps the L1 maintainer pointing to the best headers to sync
......
...@@ -188,13 +188,13 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config, ...@@ -188,13 +188,13 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config,
func makePrefetcher(ctx context.Context, logger log.Logger, kv kvstore.KV, cfg *config.Config) (*prefetcher.Prefetcher, error) { func makePrefetcher(ctx context.Context, logger log.Logger, kv kvstore.KV, cfg *config.Config) (*prefetcher.Prefetcher, error) {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL) logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL) l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL, client.WithDialBackoff(10))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to setup L1 RPC: %w", err) return nil, fmt.Errorf("failed to setup L1 RPC: %w", err)
} }
logger.Info("Connecting to L2 node", "l2", cfg.L2URL) logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
l2RPC, err := client.NewRPC(ctx, logger, cfg.L2URL) l2RPC, err := client.NewRPC(ctx, logger, cfg.L2URL, client.WithDialBackoff(10))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to setup L2 RPC: %w", err) return nil, fmt.Errorf("failed to setup L2 RPC: %w", err)
} }
......
...@@ -3,8 +3,6 @@ package client ...@@ -3,8 +3,6 @@ package client
import ( import (
"context" "context"
"fmt" "fmt"
"net"
"net/url"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
...@@ -53,7 +51,7 @@ func DialRollupClientWithTimeout(timeout time.Duration, log log.Logger, url stri ...@@ -53,7 +51,7 @@ func DialRollupClientWithTimeout(timeout time.Duration, log log.Logger, url stri
func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string) (*rpc.Client, error) { func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string) (*rpc.Client, error) {
bOff := backoff.Fixed(defaultRetryTime) bOff := backoff.Fixed(defaultRetryTime)
return backoff.Do(ctx, defaultRetryCount, bOff, func() (*rpc.Client, error) { return backoff.Do(ctx, defaultRetryCount, bOff, func() (*rpc.Client, error) {
if !IsURLAvailable(addr) { if !client.IsURLAvailable(addr) {
log.Warn("failed to dial address, but may connect later", "addr", addr) log.Warn("failed to dial address, but may connect later", "addr", addr)
return nil, fmt.Errorf("address unavailable (%s)", addr) return nil, fmt.Errorf("address unavailable (%s)", addr)
} }
...@@ -64,16 +62,3 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string) ...@@ -64,16 +62,3 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string)
return client, nil return client, nil
}) })
} }
func IsURLAvailable(address string) bool {
u, err := url.Parse(address)
if err != nil {
return false
}
conn, err := net.DialTimeout("tcp", u.Host, 5*time.Second)
if err != nil {
return false
}
conn.Close()
return true
}
FROM ethereum/client-go:v1.12.0 FROM ethereum/client-go:v1.12.1
RUN apk add --no-cache jq RUN apk add --no-cache jq
......
...@@ -297,7 +297,78 @@ LegacyERC20ETH_Test:test_transferFrom_doesNotExist_reverts() (gas: 12957) ...@@ -297,7 +297,78 @@ LegacyERC20ETH_Test:test_transferFrom_doesNotExist_reverts() (gas: 12957)
LegacyERC20ETH_Test:test_transfer_doesNotExist_reverts() (gas: 10755) LegacyERC20ETH_Test:test_transfer_doesNotExist_reverts() (gas: 10755)
LegacyMessagePasser_Test:test_passMessageToL1_succeeds() (gas: 34524) LegacyMessagePasser_Test:test_passMessageToL1_succeeds() (gas: 34524)
LibPosition_Test:test_pos_correctness_succeeds() (gas: 38689) LibPosition_Test:test_pos_correctness_succeeds() (gas: 38689)
MIPS_Test:test_step_abi_succeeds() (gas: 57803) MIPS_Test:test_add_succeeds() (gas: 121593)
MIPS_Test:test_addi_succeeds() (gas: 121896)
MIPS_Test:test_addu_succeeds() (gas: 121645)
MIPS_Test:test_addui_succeeds() (gas: 121953)
MIPS_Test:test_and_succeeds() (gas: 121628)
MIPS_Test:test_andi_succeeds() (gas: 121770)
MIPS_Test:test_beq_succeeds() (gas: 202355)
MIPS_Test:test_bgez_succeeds() (gas: 121507)
MIPS_Test:test_bgtz_succeeds() (gas: 121428)
MIPS_Test:test_blez_succeeds() (gas: 121406)
MIPS_Test:test_bltz_succeeds() (gas: 121482)
MIPS_Test:test_bne_succeeds() (gas: 121548)
MIPS_Test:test_branch_inDelaySlot_fails() (gas: 85977)
MIPS_Test:test_brk_succeeds() (gas: 121509)
MIPS_Test:test_clo_succeeds() (gas: 121991)
MIPS_Test:test_clone_succeeds() (gas: 121484)
MIPS_Test:test_clz_succeeds() (gas: 122440)
MIPS_Test:test_div_succeeds() (gas: 121806)
MIPS_Test:test_divu_succeeds() (gas: 121806)
MIPS_Test:test_exit_succeeds() (gas: 121386)
MIPS_Test:test_fcntl_succeeds() (gas: 203171)
MIPS_Test:test_illegal_instruction_fails() (gas: 91153)
MIPS_Test:test_invalid_root_fails() (gas: 435656)
MIPS_Test:test_jal_succeeds() (gas: 117399)
MIPS_Test:test_jalr_succeeds() (gas: 121349)
MIPS_Test:test_jr_succeeds() (gas: 121138)
MIPS_Test:test_jump_inDelaySlot_fails() (gas: 85512)
MIPS_Test:test_jump_succeeds() (gas: 120353)
MIPS_Test:test_lb_succeeds() (gas: 127346)
MIPS_Test:test_lbu_succeeds() (gas: 127266)
MIPS_Test:test_lh_succeeds() (gas: 127345)
MIPS_Test:test_lhu_succeeds() (gas: 127262)
MIPS_Test:test_ll_succeeds() (gas: 127282)
MIPS_Test:test_lui_succeeds() (gas: 121531)
MIPS_Test:test_lw_succeeds() (gas: 127158)
MIPS_Test:test_lwl_succeeds() (gas: 241457)
MIPS_Test:test_lwr_succeeds() (gas: 241767)
MIPS_Test:test_mfhi_succeeds() (gas: 121458)
MIPS_Test:test_mflo_succeeds() (gas: 121484)
MIPS_Test:test_mmap_succeeds() (gas: 118492)
MIPS_Test:test_movn_succeeds() (gas: 202409)
MIPS_Test:test_movz_succeeds() (gas: 202313)
MIPS_Test:test_mthi_succeeds() (gas: 121428)
MIPS_Test:test_mtlo_succeeds() (gas: 121478)
MIPS_Test:test_mul_succeeds() (gas: 121541)
MIPS_Test:test_mult_succeeds() (gas: 121645)
MIPS_Test:test_multu_succeeds() (gas: 121698)
MIPS_Test:test_nor_succeeds() (gas: 121739)
MIPS_Test:test_or_succeeds() (gas: 121635)
MIPS_Test:test_ori_succeeds() (gas: 121865)
MIPS_Test:test_preimage_read_succeeds() (gas: 235922)
MIPS_Test:test_preimage_write_succeeds() (gas: 126473)
MIPS_Test:test_prestate_exited_succeeds() (gas: 112970)
MIPS_Test:test_sb_succeeds() (gas: 159993)
MIPS_Test:test_sc_succeeds() (gas: 160187)
MIPS_Test:test_sh_succeeds() (gas: 160096)
MIPS_Test:test_sll_succeeds() (gas: 121434)
MIPS_Test:test_sllv_succeeds() (gas: 121624)
MIPS_Test:test_slt_succeeds() (gas: 203244)
MIPS_Test:test_sltu_succeeds() (gas: 121871)
MIPS_Test:test_sra_succeeds() (gas: 121719)
MIPS_Test:test_srav_succeeds() (gas: 121959)
MIPS_Test:test_srl_succeeds() (gas: 121514)
MIPS_Test:test_srlv_succeeds() (gas: 121707)
MIPS_Test:test_step_abi_succeeds() (gas: 57876)
MIPS_Test:test_sub_succeeds() (gas: 121674)
MIPS_Test:test_subu_succeeds() (gas: 121682)
MIPS_Test:test_sw_succeeds() (gas: 160050)
MIPS_Test:test_swl_succeeds() (gas: 160066)
MIPS_Test:test_swr_succeeds() (gas: 160141)
MIPS_Test:test_xor_succeeds() (gas: 121685)
MIPS_Test:test_xori_succeeds() (gas: 121894)
MerkleTrie_get_Test:test_get_corruptedProof_reverts() (gas: 5733) MerkleTrie_get_Test:test_get_corruptedProof_reverts() (gas: 5733)
MerkleTrie_get_Test:test_get_extraProofElements_reverts() (gas: 58889) MerkleTrie_get_Test:test_get_extraProofElements_reverts() (gas: 58889)
MerkleTrie_get_Test:test_get_invalidDataRemainder_reverts() (gas: 35845) MerkleTrie_get_Test:test_get_invalidDataRemainder_reverts() (gas: 35845)
...@@ -360,9 +431,9 @@ OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutp ...@@ -360,9 +431,9 @@ OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutp
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutputTimestampIsNotFinalized_reverts() (gas: 182306) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutputTimestampIsNotFinalized_reverts() (gas: 182306)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalNotProven_reverts() (gas: 41780) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalNotProven_reverts() (gas: 41780)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalProofNotOldEnough_reverts() (gas: 173953) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalProofNotOldEnough_reverts() (gas: 173953)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onInsufficientGas_reverts() (gas: 180701) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onInsufficientGas_reverts() (gas: 180724)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onRecentWithdrawal_reverts() (gas: 154740) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onRecentWithdrawal_reverts() (gas: 154740)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReentrancy_reverts() (gas: 218747) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReentrancy_reverts() (gas: 218770)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReplay_reverts() (gas: 220983) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReplay_reverts() (gas: 220983)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_paused_reverts() (gas: 38706) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_paused_reverts() (gas: 38706)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_provenWithdrawalHash_succeeds() (gas: 209679) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_provenWithdrawalHash_succeeds() (gas: 209679)
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
"l1GenesisBlockTimestamp": "0x64c811bf", "l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0", "l2GenesisRegolithTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x41c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameAbsolutePrestate": "0x41c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 31, "faultGameMaxDepth": 30,
"faultGameMaxDuration": 300, "faultGameMaxDuration": 300,
"systemConfigStartBlock": 0 "systemConfigStartBlock": 0
} }
...@@ -8,13 +8,13 @@ ...@@ -8,13 +8,13 @@
"maxSequencerDrift": 600, "maxSequencerDrift": 600,
"sequencerWindowSize": 3600, "sequencerWindowSize": 3600,
"channelTimeout": 300, "channelTimeout": 300,
"p2pSequencerAddress": "0x715b7219D986641DF9eFd9C7Ef01218D528e19ec", "p2pSequencerAddress": "0x57CACBB0d30b01eb2462e5dC940c161aff3230D3",
"batchInboxAddress": "0xff00000000000000000000000000000011155420", "batchInboxAddress": "0xff00000000000000000000000000000011155420",
"batchSenderAddress": "0x7431310e026B69BFC676C0013E12A1A11411EEc9", "batchSenderAddress": "0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c",
"l2OutputOracleSubmissionInterval": 120, "l2OutputOracleSubmissionInterval": 120,
"l2OutputOracleStartingBlockNumber": 0, "l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleStartingTimestamp": 0, "l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x02b1786A85Ec3f71fBbBa46507780dB7cF9014f6", "l2OutputOracleProposer": "0x49277EE36A024120Ee218127354c4a3591dc90A9",
"l2OutputOracleChallenger": "0xfd1D2e729aE8eEe2E146c033bf4400fE75284301", "l2OutputOracleChallenger": "0xfd1D2e729aE8eEe2E146c033bf4400fE75284301",
"finalizationPeriodSeconds": 12, "finalizationPeriodSeconds": 12,
"proxyAdminOwner": "0xfd1D2e729aE8eEe2E146c033bf4400fE75284301", "proxyAdminOwner": "0xfd1D2e729aE8eEe2E146c033bf4400fE75284301",
...@@ -37,5 +37,6 @@ ...@@ -37,5 +37,6 @@
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"eip1559Denominator": 50, "eip1559Denominator": 50,
"eip1559Elasticity": 6, "eip1559Elasticity": 6,
"l2GenesisRegolithTimeOffset": "0x0" "l2GenesisRegolithTimeOffset": "0x0",
"systemConfigStartBlock": 0
} }
...@@ -11,7 +11,7 @@ remappings = [ ...@@ -11,7 +11,7 @@ remappings = [
'@rari-capital/solmate/=lib/solmate', '@rari-capital/solmate/=lib/solmate',
"@cwia/=lib/clones-with-immutable-args/src", "@cwia/=lib/clones-with-immutable-args/src",
'forge-std/=lib/forge-std/src', 'forge-std/=lib/forge-std/src',
'ds-test/=lib/ds-test/src' 'ds-test/=lib/forge-std/lib/ds-test/src'
] ]
extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout']
bytecode_hash = 'none' bytecode_hash = 'none'
...@@ -21,12 +21,12 @@ ffi = true ...@@ -21,12 +21,12 @@ ffi = true
fuzz_runs = 16 fuzz_runs = 16
fs_permissions = [ fs_permissions = [
{ 'access'='read-write', 'path'='./.resource-metering.csv' }, { access='read-write', path='./.resource-metering.csv' },
{ 'access'='read-write', 'path'='./deployments/' }, { access='read-write', path='./deployments/' },
{ 'access'='read', 'path'='./deploy-config/' }, { access='read', path='./deploy-config/' },
{ 'access'='read', 'path'='./broadcast/' }, { access='read', path='./broadcast/' },
{ access = 'read', path = './forge-artifacts/' }, { access='read', path = './forge-artifacts/' },
{ 'access'='write', 'path'='./semver-lock.json' }, { access='write', path='./semver-lock.json' },
] ]
[fmt] [fmt]
......
Subproject commit c9ce3f25bde29fc5eb9901842bf02850dfd2d084
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
"validate-spacers": "pnpm build && npx ts-node scripts/validate-spacers.ts", "validate-spacers": "pnpm build && npx ts-node scripts/validate-spacers.ts",
"slither": "./scripts/slither.sh", "slither": "./scripts/slither.sh",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh", "slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"clean": "rm -rf ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./test-case-generator/fuzz ./scripts/differential-testing", "clean": "rm -rf ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./test-case-generator/fuzz ./scripts/differential-testing/differential-testing",
"preinstall": "npx only-allow pnpm", "preinstall": "npx only-allow pnpm",
"lint:ts:check": "eslint . --max-warnings=0", "lint:ts:check": "eslint . --max-warnings=0",
"lint:forge-tests:check": "ts-node scripts/forge-test-names.ts", "lint:forge-tests:check": "ts-node scripts/forge-test-names.ts",
......
...@@ -64,31 +64,8 @@ contract Deploy is Deployer { ...@@ -64,31 +64,8 @@ contract Deploy is Deployer {
function run() public { function run() public {
console.log("Deploying L1 system"); console.log("Deploying L1 system");
deployAddressManager(); deployProxies();
deployProxyAdmin(); deployImplementations();
deployOptimismPortalProxy();
deployL2OutputOracleProxy();
deploySystemConfigProxy();
deployL1StandardBridgeProxy();
deployL1CrossDomainMessengerProxy();
deployOptimismMintableERC20FactoryProxy();
deployL1ERC721BridgeProxy();
deployDisputeGameFactoryProxy();
deployOptimismPortal();
deployL1CrossDomainMessenger();
deployL2OutputOracle();
deployOptimismMintableERC20Factory();
deploySystemConfig();
deployL1StandardBridge();
deployL1ERC721Bridge();
deployDisputeGameFactory();
deployBlockOracle();
deployPreimageOracle();
deployMips();
transferAddressManagerOwnership();
initializeDisputeGameFactory(); initializeDisputeGameFactory();
initializeSystemConfig(); initializeSystemConfig();
...@@ -120,6 +97,38 @@ contract Deploy is Deployer { ...@@ -120,6 +97,38 @@ contract Deploy is Deployer {
} }
} }
/// @notice Deploy all of the proxies
function deployProxies() public {
deployAddressManager();
deployProxyAdmin();
deployOptimismPortalProxy();
deployL2OutputOracleProxy();
deploySystemConfigProxy();
deployL1StandardBridgeProxy();
deployL1CrossDomainMessengerProxy();
deployOptimismMintableERC20FactoryProxy();
deployL1ERC721BridgeProxy();
deployDisputeGameFactoryProxy();
transferAddressManagerOwnership();
}
/// @notice Deploy all of the implementations
function deployImplementations() public {
deployOptimismPortal();
deployL1CrossDomainMessenger();
deployL2OutputOracle();
deployOptimismMintableERC20Factory();
deploySystemConfig();
deployL1StandardBridge();
deployL1ERC721Bridge();
deployDisputeGameFactory();
deployBlockOracle();
deployPreimageOracle();
deployMips();
}
/// @notice Deploy the AddressManager /// @notice Deploy the AddressManager
function deployAddressManager() public broadcast returns (address addr_) { function deployAddressManager() public broadcast returns (address addr_) {
AddressManager manager = new AddressManager(); AddressManager manager = new AddressManager();
...@@ -386,7 +395,6 @@ contract Deploy is Deployer { ...@@ -386,7 +395,6 @@ contract Deploy is Deployer {
/// @notice Deploy the SystemConfig /// @notice Deploy the SystemConfig
function deploySystemConfig() public broadcast returns (address addr_) { function deploySystemConfig() public broadcast returns (address addr_) {
SystemConfig config = new SystemConfig(); SystemConfig config = new SystemConfig();
bytes32 batcherHash = bytes32(uint256(uint160(cfg.batchSenderAddress())));
require(config.owner() == address(0xdEaD)); require(config.owner() == address(0xdEaD));
require(config.overhead() == 0); require(config.overhead() == 0);
...@@ -409,7 +417,7 @@ contract Deploy is Deployer { ...@@ -409,7 +417,7 @@ contract Deploy is Deployer {
require(config.optimismPortal() == address(0)); require(config.optimismPortal() == address(0));
require(config.l1CrossDomainMessenger() == address(0)); require(config.l1CrossDomainMessenger() == address(0));
require(config.optimismMintableERC20Factory() == address(0)); require(config.optimismMintableERC20Factory() == address(0));
require(config.startBlock() == 0); require(config.startBlock() == type(uint256).max);
save("SystemConfig", address(config)); save("SystemConfig", address(config));
console.log("SystemConfig deployed at %s", address(config)); console.log("SystemConfig deployed at %s", address(config));
...@@ -481,6 +489,7 @@ contract Deploy is Deployer { ...@@ -481,6 +489,7 @@ contract Deploy is Deployer {
address systemConfig = mustGetAddress("SystemConfig"); address systemConfig = mustGetAddress("SystemConfig");
bytes32 batcherHash = bytes32(uint256(uint160(cfg.batchSenderAddress()))); bytes32 batcherHash = bytes32(uint256(uint160(cfg.batchSenderAddress())));
uint256 startBlock = cfg.systemConfigStartBlock();
proxyAdmin.upgradeAndCall({ proxyAdmin.upgradeAndCall({
_proxy: payable(systemConfigProxy), _proxy: payable(systemConfigProxy),
...@@ -495,7 +504,7 @@ contract Deploy is Deployer { ...@@ -495,7 +504,7 @@ contract Deploy is Deployer {
uint64(cfg.l2GenesisBlockGasLimit()), uint64(cfg.l2GenesisBlockGasLimit()),
cfg.p2pSequencerAddress(), cfg.p2pSequencerAddress(),
Constants.DEFAULT_RESOURCE_CONFIG(), Constants.DEFAULT_RESOURCE_CONFIG(),
cfg.systemConfigStartBlock(), startBlock,
cfg.batchInboxAddress(), cfg.batchInboxAddress(),
SystemConfig.Addresses({ SystemConfig.Addresses({
l1CrossDomainMessenger: mustGetAddress("L1CrossDomainMessengerProxy"), l1CrossDomainMessenger: mustGetAddress("L1CrossDomainMessengerProxy"),
...@@ -533,7 +542,13 @@ contract Deploy is Deployer { ...@@ -533,7 +542,13 @@ contract Deploy is Deployer {
require(config.l2OutputOracle() == mustGetAddress("L2OutputOracleProxy")); require(config.l2OutputOracle() == mustGetAddress("L2OutputOracleProxy"));
require(config.optimismPortal() == mustGetAddress("OptimismPortalProxy")); require(config.optimismPortal() == mustGetAddress("OptimismPortalProxy"));
require(config.l1CrossDomainMessenger() == mustGetAddress("L1CrossDomainMessengerProxy")); require(config.l1CrossDomainMessenger() == mustGetAddress("L1CrossDomainMessengerProxy"));
require(config.startBlock() == cfg.systemConfigStartBlock());
// A non zero start block is an override
if (startBlock != 0) {
require(config.startBlock() == startBlock);
} else {
require(config.startBlock() == block.number);
}
} }
/// @notice Initialize the L1StandardBridge /// @notice Initialize the L1StandardBridge
......
...@@ -5,7 +5,9 @@ import ( ...@@ -5,7 +5,9 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
"strconv"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
...@@ -60,6 +62,15 @@ var ( ...@@ -60,6 +62,15 @@ var (
proveWithdrawalInputsArgs = abi.Arguments{ proveWithdrawalInputsArgs = abi.Arguments{
{Name: "inputs", Type: proveWithdrawalInputs}, {Name: "inputs", Type: proveWithdrawalInputs},
} }
// cannonMemoryProof inputs tuple (bytes32, bytes)
cannonMemoryProof, _ = abi.NewType("tuple", "CannonMemoryProof", []abi.ArgumentMarshaling{
{Name: "memRoot", Type: "bytes32"},
{Name: "proof", Type: "bytes"},
})
cannonMemoryProofArgs = abi.Arguments{
{Name: "encodedCannonMemoryProof", Type: cannonMemoryProof},
}
) )
func main() { func main() {
...@@ -312,6 +323,39 @@ func main() { ...@@ -312,6 +323,39 @@ func main() {
// Print the output // Print the output
fmt.Print(hexutil.Encode(packed[32:])) fmt.Print(hexutil.Encode(packed[32:]))
case "cannonMemoryProof":
// <pc, insn, [memAddr, memValue]>
mem := mipsevm.NewMemory()
if len(args) != 3 && len(args) != 5 {
panic("Error: cannonMemoryProofWithProof requires 2 or 4 arguments")
}
pc, err := strconv.ParseUint(args[1], 10, 32)
checkErr(err, "Error decocding addr")
insn, err := strconv.ParseUint(args[2], 10, 32)
checkErr(err, "Error decocding insn")
mem.SetMemory(uint32(pc), uint32(insn))
var insnProof, memProof [896]byte
if len(args) == 5 {
memAddr, err := strconv.ParseUint(args[3], 10, 32)
checkErr(err, "Error decocding memAddr")
memValue, err := strconv.ParseUint(args[4], 10, 32)
checkErr(err, "Error decocding memValue")
mem.SetMemory(uint32(memAddr), uint32(memValue))
memProof = mem.MerkleProof(uint32(memAddr))
}
insnProof = mem.MerkleProof(uint32(pc))
output := struct {
MemRoot common.Hash
Proof []byte
}{
MemRoot: mem.MerkleRoot(),
Proof: append(insnProof[:], memProof[:]...),
}
packed, err := cannonMemoryProofArgs.Pack(&output)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed[32:]))
default: default:
panic(fmt.Errorf("Unknown command: %s", args[0])) panic(fmt.Errorf("Unknown command: %s", args[0]))
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
"src/L1/L1StandardBridge.sol": "0xbd7b303cefe46bc14bf1a2b81e5702ff45ce9c5257524e59778e11c75f7f5bdc", "src/L1/L1StandardBridge.sol": "0xbd7b303cefe46bc14bf1a2b81e5702ff45ce9c5257524e59778e11c75f7f5bdc",
"src/L1/L2OutputOracle.sol": "0x05ea17a834563ffa50cade81189b120b6f0805ba316d6a9893c8cf8b231e57e3", "src/L1/L2OutputOracle.sol": "0x05ea17a834563ffa50cade81189b120b6f0805ba316d6a9893c8cf8b231e57e3",
"src/L1/OptimismPortal.sol": "0xeefcc16d30e14ed7ce9970f3aeaf1d5668324b3fc1ddb4790da5804cfdd78980", "src/L1/OptimismPortal.sol": "0xeefcc16d30e14ed7ce9970f3aeaf1d5668324b3fc1ddb4790da5804cfdd78980",
"src/L1/SystemConfig.sol": "0x932c896b1bc2a32227bfe30aa66e1e6d17f057cc9a2562876bf7730858041895", "src/L1/SystemConfig.sol": "0x29beec0a03b9602a53e3ceaec2354972d917f8b80f1b3a8f03f4fb7a67753fce",
"src/L2/BaseFeeVault.sol": "0xd8df28898799b80c370e77e9aad09f79235dfda2bf13e56daf21997cfe54200d", "src/L2/BaseFeeVault.sol": "0xd8df28898799b80c370e77e9aad09f79235dfda2bf13e56daf21997cfe54200d",
"src/L2/GasPriceOracle.sol": "0xb7d8c4f3ea8db31900125e341aae42a862a2b7d3f1c1aa60c97dc2d0e022b7ba", "src/L2/GasPriceOracle.sol": "0xb7d8c4f3ea8db31900125e341aae42a862a2b7d3f1c1aa60c97dc2d0e022b7ba",
"src/L2/L1Block.sol": "0x38ea78a9611656a60ae4d58db75e96413a638e3ccb2e935052441f98a1fd3105", "src/L2/L1Block.sol": "0x38ea78a9611656a60ae4d58db75e96413a638e3ccb2e935052441f98a1fd3105",
......
...@@ -4,7 +4,6 @@ pragma solidity 0.8.19; ...@@ -4,7 +4,6 @@ pragma solidity 0.8.19;
import { EIP712 } from "@openzeppelin/contracts/utils/cryptography/draft-EIP712.sol"; import { EIP712 } from "@openzeppelin/contracts/utils/cryptography/draft-EIP712.sol";
import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol";
// prettier-ignore
import { import {
AttestationRequest, AttestationRequest,
AttestationRequestData, AttestationRequestData,
......
...@@ -98,11 +98,11 @@ contract SystemConfig is OwnableUpgradeable, Semver { ...@@ -98,11 +98,11 @@ contract SystemConfig is OwnableUpgradeable, Semver {
/// @notice The block at which the op-node can start searching for logs from. /// @notice The block at which the op-node can start searching for logs from.
uint256 public startBlock; uint256 public startBlock;
/// @custom:semver 1.4.1 /// @custom:semver 1.5.0
/// @notice Constructs the SystemConfig contract. Cannot set /// @notice Constructs the SystemConfig contract. Cannot set
/// the owner to `address(0)` due to the Ownable contract's /// the owner to `address(0)` due to the Ownable contract's
/// implementation, so set it to `address(0xdEaD)` /// implementation, so set it to `address(0xdEaD)`
constructor() Semver(1, 4, 1) { constructor() Semver(1, 5, 0) {
initialize({ initialize({
_owner: address(0xdEaD), _owner: address(0xdEaD),
_overhead: 0, _overhead: 0,
...@@ -118,7 +118,7 @@ contract SystemConfig is OwnableUpgradeable, Semver { ...@@ -118,7 +118,7 @@ contract SystemConfig is OwnableUpgradeable, Semver {
systemTxMaxGas: 0, systemTxMaxGas: 0,
maximumBaseFee: 0 maximumBaseFee: 0
}), }),
_startBlock: 0, _startBlock: type(uint256).max,
_batchInbox: address(0), _batchInbox: address(0),
_addresses: SystemConfig.Addresses({ _addresses: SystemConfig.Addresses({
l1CrossDomainMessenger: address(0), l1CrossDomainMessenger: address(0),
......
...@@ -678,6 +678,38 @@ contract FFIInterface is Test { ...@@ -678,6 +678,38 @@ contract FFIInterface is Test {
return abi.decode(vm.ffi(cmds), (bytes32, bytes, bytes, bytes[])); return abi.decode(vm.ffi(cmds), (bytes32, bytes, bytes, bytes[]));
} }
function getCannonMemoryProof(uint32 pc, uint32 insn) external returns (bytes32, bytes memory) {
string[] memory cmds = new string[](4);
cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "cannonMemoryProof";
cmds[2] = vm.toString(pc);
cmds[3] = vm.toString(insn);
bytes memory result = vm.ffi(cmds);
(bytes32 memRoot, bytes memory proof) = abi.decode(result, (bytes32, bytes));
return (memRoot, proof);
}
function getCannonMemoryProof(
uint32 pc,
uint32 insn,
uint32 memAddr,
uint32 memVal
)
external
returns (bytes32, bytes memory)
{
string[] memory cmds = new string[](6);
cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "cannonMemoryProof";
cmds[2] = vm.toString(pc);
cmds[3] = vm.toString(insn);
cmds[4] = vm.toString(memAddr);
cmds[5] = vm.toString(memVal);
bytes memory result = vm.ffi(cmds);
(bytes32 memRoot, bytes memory proof) = abi.decode(result, (bytes32, bytes));
return (memRoot, proof);
}
} }
// Used for testing a future upgrade beyond the current implementations. // Used for testing a future upgrade beyond the current implementations.
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment