Commit ebc6571b authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into feat/goerli-upgrade

parents 30c12ecf ebd642a8
...@@ -705,6 +705,18 @@ jobs: ...@@ -705,6 +705,18 @@ jobs:
command: make fuzz command: make fuzz
working_directory: op-node working_directory: op-node
fuzz-op-service:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- check-changed:
patterns: op-service
- run:
name: Fuzz
command: make fuzz
working_directory: op-service
fuzz-op-chain-ops: fuzz-op-chain-ops:
docker: docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
...@@ -1241,6 +1253,7 @@ workflows: ...@@ -1241,6 +1253,7 @@ workflows:
- semgrep-scan - semgrep-scan
- go-mod-tidy - go-mod-tidy
- fuzz-op-node - fuzz-op-node
- fuzz-op-service
- fuzz-op-chain-ops - fuzz-op-chain-ops
- fuzz-cannon - fuzz-cannon
- bedrock-markdown - bedrock-markdown
......
...@@ -20,49 +20,10 @@ updates: ...@@ -20,49 +20,10 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: daily interval: daily
time: "16:30"
timezone: "America/New_York"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
labels: versioning-strategy: auto
- dependabot
- package-ecosystem: npm
directory: "/packages/chain-mon"
schedule:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- package-ecosystem: npm
directory: "/packages/core-utils"
schedule:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- package-ecosystem: npm
directory: "/packages/contracts-ts"
schedule:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- package-ecosystem: npm
directory: "/packages/sdk"
schedule:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- package-ecosystem: npm
directory: "/packages/fee-estimation"
schedule:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- package-ecosystem: npm
directory: "/packages/common-ts"
schedule:
interval: daily
open-pull-requests-limit: 10
labels: labels:
- dependabot - dependabot
......
...@@ -112,9 +112,17 @@ func NewEVMEnv(contracts *Contracts, addrs *Addresses) (*vm.EVM, *state.StateDB) ...@@ -112,9 +112,17 @@ func NewEVMEnv(contracts *Contracts, addrs *Addresses) (*vm.EVM, *state.StateDB)
env := vm.NewEVM(blockContext, vm.TxContext{}, state, chainCfg, vmCfg) env := vm.NewEVM(blockContext, vm.TxContext{}, state, chainCfg, vmCfg)
// pre-deploy the contracts // pre-deploy the contracts
env.StateDB.SetCode(addrs.MIPS, contracts.MIPS.DeployedBytecode.Object)
env.StateDB.SetCode(addrs.Oracle, contracts.Oracle.DeployedBytecode.Object) env.StateDB.SetCode(addrs.Oracle, contracts.Oracle.DeployedBytecode.Object)
env.StateDB.SetState(addrs.MIPS, common.Hash{}, addrs.Oracle.Hash())
var mipsCtorArgs [32]byte
copy(mipsCtorArgs[12:], addrs.Oracle[:])
mipsDeploy := append(hexutil.MustDecode(bindings.MIPSMetaData.Bin), mipsCtorArgs[:]...)
startingGas := uint64(30_000_000)
_, deployedMipsAddr, leftOverGas, err := env.Create(vm.AccountRef(addrs.Sender), mipsDeploy, startingGas, big.NewInt(0))
if err != nil {
panic(fmt.Errorf("failed to deploy MIPS contract: %w. took %d gas", err, startingGas-leftOverGas))
}
addrs.MIPS = deployedMipsAddr
rules := env.ChainConfig().Rules(header.Number, true, header.Time) rules := env.ChainConfig().Rules(header.Number, true, header.Time)
env.StateDB.Prepare(rules, addrs.Sender, addrs.FeeRecipient, &addrs.MIPS, vm.ActivePrecompiles(rules), nil) env.StateDB.Prepare(rules, addrs.Sender, addrs.FeeRecipient, &addrs.MIPS, vm.ActivePrecompiles(rules), nil)
......
...@@ -165,6 +165,43 @@ func TestEVM(t *testing.T) { ...@@ -165,6 +165,43 @@ func TestEVM(t *testing.T) {
} }
} }
func TestEVMSingleStep(t *testing.T) {
contracts, addrs := testContractsSetup(t)
var tracer vm.EVMLogger
//tracer = SourceMapTracer(t, contracts, addrs)
type testInput struct {
name string
pc uint32
nextPC uint32
insn uint32
}
cases := []testInput{
{"j MSB set target", 0, 4, 0x0A_00_00_02}, // j 0x02_00_00_02
{"j non-zero PC region", 0x10000000, 0x10000004, 0x08_00_00_02}, // j 0x2
{"jal MSB set target", 0, 4, 0x0E_00_00_02}, // jal 0x02_00_00_02
{"jal non-zero PC region", 0x10000000, 0x10000004, 0x0C_00_00_02}, // jal 0x2
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
state := &State{PC: tt.pc, NextPC: tt.nextPC, Memory: NewMemory()}
state.Memory.SetMemory(tt.pc, tt.insn)
us := NewInstrumentedState(state, nil, os.Stdout, os.Stderr)
stepWitness, err := us.Step(true)
require.NoError(t, err)
evm := NewMIPSEVM(contracts, addrs)
evm.SetTracer(tracer)
evmPost := evm.Step(t, stepWitness)
goPost := us.state.EncodeWitness()
require.Equal(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(),
"mipsevm produced different state than EVM")
})
}
}
func TestEVMFault(t *testing.T) { func TestEVMFault(t *testing.T) {
contracts, addrs := testContractsSetup(t) contracts, addrs := testContractsSetup(t)
var tracer vm.EVMLogger // no-tracer by default, but see SourceMapTracer and MarkdownTracer var tracer vm.EVMLogger // no-tracer by default, but see SourceMapTracer and MarkdownTracer
......
...@@ -285,13 +285,13 @@ func (m *InstrumentedState) mipsStep() error { ...@@ -285,13 +285,13 @@ func (m *InstrumentedState) mipsStep() error {
// j-type j/jal // j-type j/jal
if opcode == 2 || opcode == 3 { if opcode == 2 || opcode == 3 {
// TODO likely bug in original code: MIPS spec says this should be in the "current" region;
// a 256 MB aligned region (i.e. use top 4 bits of branch delay slot (pc+4))
linkReg := uint32(0) linkReg := uint32(0)
if opcode == 3 { if opcode == 3 {
linkReg = 31 linkReg = 31
} }
return m.handleJump(linkReg, SE(insn&0x03FFFFFF, 26)<<2) // Take top 4 bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset
target := (m.state.NextPC & 0xF0000000) | ((insn & 0x03FFFFFF) << 2)
return m.handleJump(linkReg, target)
} }
// register fetch // register fetch
...@@ -396,7 +396,6 @@ func (m *InstrumentedState) mipsStep() error { ...@@ -396,7 +396,6 @@ func (m *InstrumentedState) mipsStep() error {
func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 { func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
opcode := insn >> 26 // 6-bits opcode := insn >> 26 // 6-bits
fun := insn & 0x3f // 6-bits fun := insn & 0x3f // 6-bits
// TODO(CLI-4136): deref the immed into a register
if opcode < 0x20 { if opcode < 0x20 {
// transform ArithLogI // transform ArithLogI
......
...@@ -15,7 +15,6 @@ require ( ...@@ -15,7 +15,6 @@ require (
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2 github.com/hashicorp/golang-lru v1.0.2
github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/hashicorp/golang-lru/v2 v2.0.2
...@@ -24,7 +23,7 @@ require ( ...@@ -24,7 +23,7 @@ require (
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0 github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.4.3 github.com/jackc/pgx/v5 v5.4.3
github.com/lib/pq v1.10.9 github.com/joho/godotenv v1.5.1
github.com/libp2p/go-libp2p v0.27.8 github.com/libp2p/go-libp2p v0.27.8
github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0
...@@ -36,9 +35,7 @@ require ( ...@@ -36,9 +35,7 @@ require (
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.14.0
github.com/rs/cors v1.9.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/urfave/cli v1.22.14
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.12.0 golang.org/x/crypto v0.12.0
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
...@@ -46,7 +43,7 @@ require ( ...@@ -46,7 +43,7 @@ require (
golang.org/x/term v0.11.0 golang.org/x/term v0.11.0
golang.org/x/time v0.3.0 golang.org/x/time v0.3.0
gorm.io/driver/postgres v1.5.2 gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.2 gorm.io/gorm v1.25.3
) )
require ( require (
...@@ -120,6 +117,7 @@ require ( ...@@ -120,6 +117,7 @@ require (
github.com/koron/go-ssdp v0.0.4 // indirect github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect
...@@ -166,12 +164,13 @@ require ( ...@@ -166,12 +164,13 @@ require (
github.com/raulk/go-watchdog v1.3.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rs/cors v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect github.com/stretchr/objx v0.5.0 // indirect
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect github.com/tklauser/numcpus v0.5.0 // indirect
......
...@@ -304,7 +304,6 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ ...@@ -304,7 +304,6 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
...@@ -426,6 +425,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD ...@@ -426,6 +425,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
...@@ -763,8 +764,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F ...@@ -763,8 +764,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
...@@ -783,8 +784,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT ...@@ -783,8 +784,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
...@@ -1112,8 +1111,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= ...@@ -1112,8 +1111,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0= gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0=
gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8= gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho= gorm.io/gorm v1.25.3 h1:zi4rHZj1anhZS2EuEODMhDisGy+Daq9jtPrNGgbQYD8=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= gorm.io/gorm v1.25.3/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
......
docker-compose.dev.yml docker-compose.dev.yml
.env .env
indexer indexer
indexer-refresh
...@@ -8,11 +8,8 @@ LDFLAGS := -ldflags "$(LDFLAGSSTRING)" ...@@ -8,11 +8,8 @@ LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
indexer: indexer:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/indexer env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/indexer
indexer-refresh:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/indexer-refresh
clean: clean:
rm indexer && rm indexer-refresh rm indexer
test: test:
go test -v ./... go test -v ./...
...@@ -22,7 +19,6 @@ lint: ...@@ -22,7 +19,6 @@ lint:
.PHONY: \ .PHONY: \
indexer \ indexer \
indexer-refresh \
bindings \ bindings \
bindings-scc \ bindings-scc \
clean \ clean \
......
package api package api
import ( import (
"encoding/json" "fmt"
"net/http" "net/http"
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
) )
type PaginationResponse struct { const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
// TODO type this better
Data interface{} `json:"data"`
Cursor string `json:"cursor"`
HasNextPage bool `json:"hasNextPage"`
}
func (a *Api) L1DepositsHandler(w http.ResponseWriter, r *http.Request) {
bv := a.BridgeTransfersView
address := common.HexToAddress(chi.URLParam(r, "address"))
// limit := getIntFromQuery(r, "limit", 10)
// cursor := r.URL.Query().Get("cursor")
// sortDirection := r.URL.Query().Get("sortDirection")
deposits, err := bv.L1BridgeDepositsByAddress(address)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// This is not the shape of the response we want!!!
// will add in the individual features in future prs 1 by 1
response := PaginationResponse{
Data: deposits,
// Cursor: nextCursor,
HasNextPage: false,
}
jsonResponse(w, response, http.StatusOK)
}
func (a *Api) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
bv := a.BridgeTransfersView
address := common.HexToAddress(chi.URLParam(r, "address"))
// limit := getIntFromQuery(r, "limit", 10)
// cursor := r.URL.Query().Get("cursor")
// sortDirection := r.URL.Query().Get("sortDirection")
withdrawals, err := bv.L2BridgeWithdrawalsByAddress(address)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// This is not the shape of the response we want!!!
// will add in the individual features in future prs 1 by 1
response := PaginationResponse{
Data: withdrawals,
// Cursor: nextCursor,
HasNextPage: false,
}
jsonResponse(w, response, http.StatusOK)
}
func (a *Api) HealthzHandler(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, "ok", http.StatusOK)
}
func jsonResponse(w http.ResponseWriter, data interface{}, statusCode int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
if err := json.NewEncoder(w).Encode(data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
type Api struct { type Api struct {
Router *chi.Mux Router *chi.Mux
BridgeTransfersView database.BridgeTransfersView
} }
func NewApi(bv database.BridgeTransfersView) *Api { func NewApi(bv database.BridgeTransfersView, logger log.Logger) *Api {
logger.Info("Initializing API...")
r := chi.NewRouter() r := chi.NewRouter()
api := &Api{Router: r, BridgeTransfersView: bv} h := routes.NewRoutes(logger, bv)
// these regex are .+ because I wasn't sure what they should be api := &Api{Router: r}
// don't want a regex for addresses because would prefer to validate the address
// with go-ethereum and throw a friendly error message
r.Get("/api/v0/deposits/{address:.+}", api.L1DepositsHandler)
r.Get("/api/v0/withdrawals/{address:.+}", api.L2WithdrawalsHandler)
r.Get("/healthz", api.HealthzHandler)
return api r.Get("/healthz", h.HealthzHandler)
r.Get(fmt.Sprintf("/api/v0/deposits/{address:%s}", ethereumAddressRegex), h.L1DepositsHandler)
r.Get(fmt.Sprintf("/api/v0/withdrawals/{address:%s}", ethereumAddressRegex), h.L2WithdrawalsHandler)
return api
} }
func (a *Api) Listen(port string) error { func (a *Api) Listen(port string) error {
......
package api package api
import ( import (
"fmt"
"math/big" "math/big"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// MockBridgeTransfersView mocks the BridgeTransfersView interface // MockBridgeTransfersView mocks the BridgeTransfersView interface
type MockBridgeTransfersView struct{} type MockBridgeTransfersView struct{}
var mockAddress = "0x4204204204204204204204204204204204204204"
var ( var (
deposit = database.L1BridgeDeposit{ deposit = database.L1BridgeDeposit{
TransactionSourceHash: common.HexToHash("abc"), TransactionSourceHash: common.HexToHash("abc"),
...@@ -23,7 +28,7 @@ var ( ...@@ -23,7 +28,7 @@ var (
} }
withdrawal = database.L2BridgeWithdrawal{ withdrawal = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: common.HexToHash("0x456"), TransactionWithdrawalHash: common.HexToHash("0x420"),
CrossDomainMessengerNonce: &database.U256{Int: big.NewInt(0)}, CrossDomainMessengerNonce: &database.U256{Int: big.NewInt(0)},
Tx: database.Transaction{}, Tx: database.Transaction{},
TokenPair: database.TokenPair{}, TokenPair: database.TokenPair{},
...@@ -65,7 +70,8 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common. ...@@ -65,7 +70,8 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.
} }
func TestHealthz(t *testing.T) { func TestHealthz(t *testing.T) {
api := NewApi(&MockBridgeTransfersView{}) logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
request, err := http.NewRequest("GET", "/healthz", nil) request, err := http.NewRequest("GET", "/healthz", nil)
assert.Nil(t, err) assert.Nil(t, err)
...@@ -76,8 +82,9 @@ func TestHealthz(t *testing.T) { ...@@ -76,8 +82,9 @@ func TestHealthz(t *testing.T) {
} }
func TestL1BridgeDepositsHandler(t *testing.T) { func TestL1BridgeDepositsHandler(t *testing.T) {
api := NewApi(&MockBridgeTransfersView{}) logger := testlog.Logger(t, log.LvlInfo)
request, err := http.NewRequest("GET", "/api/v0/deposits/0x123", nil) api := NewApi(&MockBridgeTransfersView{}, logger)
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err) assert.Nil(t, err)
responseRecorder := httptest.NewRecorder() responseRecorder := httptest.NewRecorder()
...@@ -87,8 +94,9 @@ func TestL1BridgeDepositsHandler(t *testing.T) { ...@@ -87,8 +94,9 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
} }
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) { func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
api := NewApi(&MockBridgeTransfersView{}) logger := testlog.Logger(t, log.LvlInfo)
request, err := http.NewRequest("GET", "/api/v0/withdrawals/0x123", nil) api := NewApi(&MockBridgeTransfersView{}, logger)
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err) assert.Nil(t, err)
responseRecorder := httptest.NewRecorder() responseRecorder := httptest.NewRecorder()
......
package routes
import (
"encoding/json"
"net/http"
"github.com/ethereum/go-ethereum/log"
)
// lazily typing numbers fixme
type Transaction struct {
Timestamp uint64 `json:"timestamp"`
TransactionHash string `json:"transactionHash"`
}
type Block struct {
BlockNumber int64 `json:"number"`
BlockHash string `json:"hash"`
// ParentBlockHash string `json:"parentHash"`
}
type Extensions struct {
OptimismBridgeAddress string `json:"OptimismBridgeAddress"`
}
type TokenInfo struct {
// TODO lazily typing ints go through them all with fine tooth comb once api is up
ChainId int `json:"chainId"`
Address string `json:"address"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals int `json:"decimals"`
Extensions Extensions `json:"extensions"`
}
func jsonResponse(w http.ResponseWriter, logger log.Logger, data interface{}, statusCode int) {
w.Header().Set("Content-Type", "application/json")
jsonData, err := json.Marshal(data)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
logger.Error("Failed to marshal JSON: %v", err)
return
}
w.WriteHeader(statusCode)
_, err = w.Write(jsonData)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
logger.Error("Failed to write JSON data", err)
return
}
}
package routes
import (
"net/http"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
"github.com/go-chi/chi/v5"
)
type DepositItem struct {
Guid string `json:"guid"`
From string `json:"from"`
To string `json:"to"`
// TODO could consider OriginTx to be more generic to handling L2 to L2 deposits
// this seems more clear today though
Tx Transaction `json:"Tx"`
Block Block `json:"Block"`
Amount string `json:"amount"`
L1Token TokenInfo `json:"l1Token"`
L2Token TokenInfo `json:"l2Token"`
}
type DepositResponse struct {
Cursor string `json:"cursor"`
HasNextPage bool `json:"hasNextPage"`
Items []DepositItem `json:"items"`
}
// TODO this is original spec but maybe include the l2 block info too for the relayed tx
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
func newDepositResponse(deposits []*database.L1BridgeDepositWithTransactionHashes) DepositResponse {
items := make([]DepositItem, len(deposits))
for _, deposit := range deposits {
item := DepositItem{
Guid: deposit.L1BridgeDeposit.TransactionSourceHash.String(),
Block: Block{
BlockNumber: 420420, // TODO
BlockHash: "0x420", // TODO
},
Tx: Transaction{
TransactionHash: "0x420", // TODO
Timestamp: deposit.L1BridgeDeposit.Tx.Timestamp,
},
From: deposit.L1BridgeDeposit.Tx.FromAddress.String(),
To: deposit.L1BridgeDeposit.Tx.ToAddress.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.Int.String(),
L1Token: TokenInfo{
ChainId: 1,
Address: deposit.L1BridgeDeposit.TokenPair.L1TokenAddress.String(),
Name: "TODO",
Symbol: "TODO",
Decimals: 420,
Extensions: Extensions{
OptimismBridgeAddress: "0x420", // TODO
},
},
L2Token: TokenInfo{
ChainId: 10,
Address: deposit.L1BridgeDeposit.TokenPair.L2TokenAddress.String(),
Name: "TODO",
Symbol: "TODO",
Decimals: 420,
Extensions: Extensions{
OptimismBridgeAddress: "0x420", // TODO
},
},
}
items = append(items, item)
}
return DepositResponse{
Cursor: "42042042-4204-4204-4204-420420420420", // TODO
HasNextPage: false, // TODO
Items: items,
}
}
func (h Routes) L1DepositsHandler(w http.ResponseWriter, r *http.Request) {
address := common.HexToAddress(chi.URLParam(r, "address"))
deposits, err := h.BridgeTransfersView.L1BridgeDepositsByAddress(address)
if err != nil {
http.Error(w, "Internal server error reading deposits", http.StatusInternalServerError)
h.Logger.Error("Unable to read deposits from DB")
h.Logger.Error(err.Error())
return
}
response := newDepositResponse(deposits)
jsonResponse(w, h.Logger, response, http.StatusOK)
}
package routes
import (
"net/http"
)
func (h Routes) HealthzHandler(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, h.Logger, "ok", http.StatusOK)
}
package routes
import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/log"
)
type Routes struct {
Logger log.Logger
BridgeTransfersView database.BridgeTransfersView
}
func NewRoutes(logger log.Logger, bv database.BridgeTransfersView) Routes {
return Routes{
Logger: logger,
BridgeTransfersView: bv,
}
}
package routes
import (
"net/http"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
"github.com/go-chi/chi/v5"
)
type Proof struct {
TransactionHash string `json:"transactionHash"`
BlockTimestamp uint64 `json:"blockTimestamp"`
BlockNumber int `json:"blockNumber"`
}
type Claim struct {
TransactionHash string `json:"transactionHash"`
BlockTimestamp uint64 `json:"blockTimestamp"`
BlockNumber int `json:"blockNumber"`
}
type WithdrawalItem struct {
Guid string `json:"guid"`
Tx Transaction `json:"Tx"`
Block Block `json:"Block"`
From string `json:"from"`
To string `json:"to"`
TransactionHash string `json:"transactionHash"`
Amount string `json:"amount"`
Proof Proof `json:"proof"`
Claim Claim `json:"claim"`
WithdrawalState string `json:"withdrawalState"`
L1Token TokenInfo `json:"l1Token"`
L2Token TokenInfo `json:"l2Token"`
}
type WithdrawalResponse struct {
Cursor string `json:"cursor"`
HasNextPage bool `json:"hasNextPage"`
Items []WithdrawalItem `json:"items"`
}
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransactionHashes) WithdrawalResponse {
items := make([]WithdrawalItem, len(withdrawals))
for _, withdrawal := range withdrawals {
item := WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
Block: Block{
BlockNumber: 420420, // TODO
BlockHash: "0x420", // TODO
},
Tx: Transaction{
TransactionHash: "0x420", // TODO
Timestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
},
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.Int.String(),
Proof: Proof{
TransactionHash: withdrawal.ProvenL1TransactionHash.String(),
BlockTimestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
BlockNumber: 420, // TODO Block struct instead
},
Claim: Claim{
TransactionHash: withdrawal.FinalizedL1TransactionHash.String(),
BlockTimestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp, // Using L2 timestamp for now, might need adjustment
BlockNumber: 420, // TODO block struct
},
WithdrawalState: "COMPLETE", // TODO
L1Token: TokenInfo{
ChainId: 1,
Address: withdrawal.L2BridgeWithdrawal.TokenPair.L1TokenAddress.String(),
Name: "Example", // TODO
Symbol: "EXAMPLE", // TODO
Decimals: 18, // TODO
Extensions: Extensions{
OptimismBridgeAddress: "0x636Af16bf2f682dD3109e60102b8E1A089FedAa8",
},
},
L2Token: TokenInfo{
ChainId: 10,
Address: withdrawal.L2BridgeWithdrawal.TokenPair.L2TokenAddress.String(),
Name: "Example", // TODO
Symbol: "EXAMPLE", // TODO
Decimals: 18, // TODO
Extensions: Extensions{
OptimismBridgeAddress: "0x36Af16bf2f682dD3109e60102b8E1A089FedAa86",
},
},
}
items = append(items, item)
}
return WithdrawalResponse{
Cursor: "42042042-0420-4204-2042-420420420420", // TODO
HasNextPage: true, // TODO
Items: items,
}
}
func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
address := common.HexToAddress(chi.URLParam(r, "address"))
withdrawals, err := h.BridgeTransfersView.L2BridgeWithdrawalsByAddress(address)
if err != nil {
http.Error(w, "Internal server error fetching withdrawals", http.StatusInternalServerError)
h.Logger.Error("Unable to read deposits from DB")
h.Logger.Error(err.Error())
return
}
response := newWithdrawalResponse(withdrawals)
jsonResponse(w, h.Logger, response, http.StatusOK)
}
...@@ -3,12 +3,14 @@ package cli ...@@ -3,12 +3,14 @@ package cli
import ( import (
"context" "context"
"fmt" "fmt"
"strconv"
"github.com/ethereum-optimism/optimism/indexer" "github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -23,17 +25,28 @@ type Cli struct { ...@@ -23,17 +25,28 @@ type Cli struct {
} }
func runIndexer(ctx *cli.Context) error { func runIndexer(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx)) logger := log.NewLogger(log.CLIConfig{
Level: "warn",
Color: false,
Format: "terminal",
})
configPath := ctx.String(ConfigFlag.Name) configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(configPath) cfg, err := config.LoadConfig(logger, configPath)
if err != nil { if err != nil {
logger.Error("failed to load config", "err", err) logger.Error("failed to load config", "err", err)
return err return err
} }
cfg.Logger = logger logger = log.NewLogger(cfg.Logger)
indexer, err := indexer.NewIndexer(cfg)
db, err := database.NewDB(cfg.DB)
if err != nil {
return err
}
indexer, err := indexer.NewIndexer(cfg.Chain, cfg.RPCs, db, logger)
if err != nil { if err != nil {
return err return err
} }
...@@ -51,17 +64,21 @@ func runApi(ctx *cli.Context) error { ...@@ -51,17 +64,21 @@ func runApi(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx)) logger := log.NewLogger(log.ReadCLIConfig(ctx))
configPath := ctx.String(ConfigFlag.Name) configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(configPath) cfg, err := config.LoadConfig(logger, configPath)
if err != nil { if err != nil {
logger.Error("failed to load config", "err", err) logger.Error("failed to load config", "err", err)
return err return err
} }
cfg.Logger = logger db, err := database.NewDB(cfg.DB)
fmt.Println(cfg)
if err != nil {
logger.Crit("Failed to connect to database", "err", err)
}
server := api.NewApi(db.BridgeTransfers, logger)
// finish me return server.Listen(strconv.Itoa(cfg.API.Port))
return err
} }
var ( var (
...@@ -72,15 +89,6 @@ var ( ...@@ -72,15 +89,6 @@ var (
Usage: "path to config file", Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"}, EnvVars: []string{"INDEXER_CONFIG"},
} }
// Not used yet. Use this flag to run legacy app instead
// Remove me after indexer is released
IndexerRefreshFlag = &cli.BoolFlag{
Name: "indexer-refresh",
Value: false,
Aliases: []string{"i"},
Usage: "run new unreleased indexer by passing in flag",
EnvVars: []string{"INDEXER_REFRESH"},
}
) )
// make a instance method on Cli called Run that runs cli // make a instance method on Cli called Run that runs cli
...@@ -90,7 +98,8 @@ func (c *Cli) Run(args []string) error { ...@@ -90,7 +98,8 @@ func (c *Cli) Run(args []string) error {
} }
func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli { func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
flags := append([]cli.Flag{ConfigFlag}, log.CLIFlags("INDEXER")...) flags := []cli.Flag{ConfigFlag}
flags = append(flags, log.CLIFlags("INDEXER")...)
app := &cli.App{ app := &cli.App{
Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)), Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)),
Description: "An indexer of all optimism events with a serving api layer", Description: "An indexer of all optimism events with a serving api layer",
......
# indexer/cmd/indexer-refresh
Entrypoint for the new WIP indexer. After project is deployed and stable we will delete the [indexer/main.go](../indexer/main.go) file and move [indexer-refresh/main.go](./main.go) in it's place.
package main
import (
"os"
"github.com/ethereum-optimism/optimism/indexer/cli"
"github.com/ethereum/go-ethereum/log"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
app := cli.NewCli(GitVersion, GitCommit, GitDate)
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
}
}
package main package main
import ( import (
"fmt"
"os" "os"
"github.com/ethereum-optimism/optimism/indexer/cli"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/indexer/flags"
"github.com/ethereum-optimism/optimism/indexer/legacy"
) )
var ( var (
...@@ -19,26 +14,9 @@ var ( ...@@ -19,26 +14,9 @@ var (
) )
func main() { func main() {
// Set up logger with a default INFO level in case we fail to parse flags. app := cli.NewCli(GitVersion, GitCommit, GitDate)
// Otherwise the final crtiical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate))
app.Name = "indexer"
app.Usage = "Indexer Service"
app.Description = "Service for indexing deposits and withdrawals " +
"by account on L1 and L2"
app.Action = legacy.Main(GitVersion) if err := app.Run(os.Args); err != nil {
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
} }
} }
package config package config
import ( import (
"fmt"
"os" "os"
"reflect"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/indexer/processor" "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/common"
geth_log "github.com/ethereum/go-ethereum/log"
"github.com/joho/godotenv"
) )
// in future presets can just be onchain config and fetched on initialization
// Config represents the `indexer.toml` file used to configure the indexer // Config represents the `indexer.toml` file used to configure the indexer
type Config struct { type Config struct {
Chain ChainConfig Chain ChainConfig
...@@ -16,14 +22,42 @@ type Config struct { ...@@ -16,14 +22,42 @@ type Config struct {
DB DBConfig DB DBConfig
API APIConfig API APIConfig
Metrics MetricsConfig Metrics MetricsConfig
Logger log.Logger `toml:"-"` Logger log.CLIConfig
}
// fetch this via onchain config from RPCsConfig and remove from config in future
type L1Contracts struct {
OptimismPortal common.Address
L2OutputOracle common.Address
L1CrossDomainMessenger common.Address
L1StandardBridge common.Address
L1ERC721Bridge common.Address
// Some more contracts -- ProxyAdmin, SystemConfig, etcc
// Ignore the auxiliary contracts?
// Legacy contracts? We'll add this in to index the legacy chain.
// Remove afterwards?
}
func (c L1Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
contracts := make([]common.Address, len(fields))
for i, field := range fields {
contracts[i] = (v.FieldByName(field.Name).Interface()).(common.Address)
}
return contracts
} }
// ChainConfig configures of the chain being indexed // ChainConfig configures of the chain being indexed
type ChainConfig struct { type ChainConfig struct {
// Configure known chains with the l2 chain id // Configure known chains with the l2 chain id
Preset int Preset int
L1Contracts processor.L1Contracts // Configure custom chains via providing the L1Contract addresses
L1Contracts L1Contracts
} }
// RPCsConfig configures the RPC urls // RPCsConfig configures the RPC urls
...@@ -54,22 +88,38 @@ type MetricsConfig struct { ...@@ -54,22 +88,38 @@ type MetricsConfig struct {
} }
// LoadConfig loads the `indexer.toml` config file from a given path // LoadConfig loads the `indexer.toml` config file from a given path
func LoadConfig(path string) (Config, error) { func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
if err := godotenv.Load(); err != nil {
logger.Warn("Unable to load .env file", err)
logger.Info("Continuing without .env file")
} else {
logger.Info("Loaded .env file")
}
var conf Config var conf Config
// Read the config file.
data, err := os.ReadFile(path) data, err := os.ReadFile(path)
if err != nil { if err != nil {
return conf, err return conf, err
} }
// Replace environment variables.
data = []byte(os.ExpandEnv(string(data))) data = []byte(os.ExpandEnv(string(data)))
// Decode the TOML data.
if _, err := toml.Decode(string(data), &conf); err != nil { if _, err := toml.Decode(string(data), &conf); err != nil {
logger.Info("Failed to decode config file", "message", err)
return conf, err return conf, err
} }
if conf.Chain.Preset != 0 {
knownContracts, ok := presetL1Contracts[conf.Chain.Preset]
if ok {
conf.Chain.L1Contracts = knownContracts
} else {
return conf, fmt.Errorf("unknown preset: %d", conf.Chain.Preset)
}
}
logger.Debug("Loaded config file", conf)
return conf, nil return conf, nil
} }
package config package config
import ( import (
"fmt"
"os" "os"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestLoadConfig(t *testing.T) { func TestLoadConfig(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
tmpfile, err := os.CreateTemp("", "test.toml") tmpfile, err := os.CreateTemp("", "test.toml")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
...@@ -15,7 +20,7 @@ func TestLoadConfig(t *testing.T) { ...@@ -15,7 +20,7 @@ func TestLoadConfig(t *testing.T) {
testData := ` testData := `
[chain] [chain]
preset = 1234 preset = 420
[rpcs] [rpcs]
l1-rpc = "https://l1.example.com" l1-rpc = "https://l1.example.com"
...@@ -26,6 +31,7 @@ func TestLoadConfig(t *testing.T) { ...@@ -26,6 +31,7 @@ func TestLoadConfig(t *testing.T) {
port = 5432 port = 5432
user = "postgres" user = "postgres"
password = "postgres" password = "postgres"
name = "indexer"
[api] [api]
host = "127.0.0.1" host = "127.0.0.1"
...@@ -44,18 +50,90 @@ func TestLoadConfig(t *testing.T) { ...@@ -44,18 +50,90 @@ func TestLoadConfig(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
conf, err := LoadConfig(tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, conf.Chain.Preset, 1234) require.Equal(t, conf.Chain.Preset, 420)
require.Equal(t, conf.Chain.L1Contracts.OptimismPortal.String(), presetL1Contracts[420].OptimismPortal.String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessenger.String(), presetL1Contracts[420].L1CrossDomainMessenger.String())
require.Equal(t, conf.Chain.L1Contracts.L1ERC721Bridge.String(), presetL1Contracts[420].L1ERC721Bridge.String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridge.String(), presetL1Contracts[420].L1StandardBridge.String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracle.String(), presetL1Contracts[420].L2OutputOracle.String())
require.Equal(t, conf.RPCs.L1RPC, "https://l1.example.com") require.Equal(t, conf.RPCs.L1RPC, "https://l1.example.com")
require.Equal(t, conf.RPCs.L2RPC, "https://l2.example.com") require.Equal(t, conf.RPCs.L2RPC, "https://l2.example.com")
require.Equal(t, conf.DB.Host, "127.0.0.1") require.Equal(t, conf.DB.Host, "127.0.0.1")
require.Equal(t, conf.DB.Port, 5432) require.Equal(t, conf.DB.Port, 5432)
require.Equal(t, conf.DB.User, "postgres") require.Equal(t, conf.DB.User, "postgres")
require.Equal(t, conf.DB.Password, "postgres") require.Equal(t, conf.DB.Password, "postgres")
require.Equal(t, conf.DB.Name, "indexer")
require.Equal(t, conf.API.Host, "127.0.0.1") require.Equal(t, conf.API.Host, "127.0.0.1")
require.Equal(t, conf.API.Port, 8080) require.Equal(t, conf.API.Port, 8080)
require.Equal(t, conf.Metrics.Host, "127.0.0.1") require.Equal(t, conf.Metrics.Host, "127.0.0.1")
require.Equal(t, conf.Metrics.Port, 7300) require.Equal(t, conf.Metrics.Port, 7300)
} }
func TestLoadConfig_WithoutPreset(t *testing.T) {
tmpfile, err := os.CreateTemp("", "test_without_preset.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
l1contracts = { OptimismPortal = "0x4205Fc579115071764c7423A4f12eDde41f106Ed", L2OutputOracle = "0x42097868233d1aa22e815a266982f2cf17685a27", L1CrossDomainMessenger = "0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1", L1StandardBridge = "0x4209fc46f92E8a1c0deC1b1747d010903E884bE1", L1ERC721Bridge ="0x420749f83b81B301cAb5f48EB8516B986DAef23D" }
[rpcs]
l1-rpc = "https://l1.example.com"
l2-rpc = "https://l2.example.com"
`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo)
conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err)
require.Equal(t, conf.Chain.L1Contracts.OptimismPortal.String(), common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed").String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracle.String(), common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27").String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessenger.String(), common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1").String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridge.String(), common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1").String())
require.Equal(t, conf.Chain.L1Contracts.L1ERC721Bridge.String(), common.HexToAddress("0x420749f83b81B301cAb5f48EB8516B986DAef23D").String())
require.Equal(t, conf.Chain.Preset, 0)
}
func TestLoadConfig_WithUnknownPreset(t *testing.T) {
tmpfile, err := os.CreateTemp("", "test_bad_preset.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
preset = 1234567890 # this preset doesn't exist
[rpcs]
l1-rpc = "https://l1.example.com"
l2-rpc = "https://l2.example.com"
`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo)
conf, err := LoadConfig(logger, tmpfile.Name())
var faultyPreset = 1234567890
require.Equal(t, conf.Chain.Preset, faultyPreset)
require.Error(t, err)
require.Equal(t, fmt.Sprintf("unknown preset: %d", faultyPreset), err.Error())
}
package config
import (
"github.com/ethereum/go-ethereum/common"
)
// in future presets can just be onchain config and fetched on initialization
// Mapping of l2 chain ids to their preset chain configurations
var presetL1Contracts = map[int]L1Contracts{
// OP Mainnet
10: {
OptimismPortal: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L2OutputOracle: common.HexToAddress("0xdfe97868233d1aa22e815a266982f2cf17685a27"),
L1CrossDomainMessenger: common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridge: common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1"),
L1ERC721Bridge: common.HexToAddress("0x5a7749f83b81B301cAb5f48EB8516B986DAef23D"),
},
// OP Goerli
420: {
OptimismPortal: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L2OutputOracle: common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"),
L1CrossDomainMessenger: common.HexToAddress("0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"),
L1StandardBridge: common.HexToAddress("0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"),
L1ERC721Bridge: common.HexToAddress("0x8DD330DdE8D9898d43b4dc840Da27A07dF91b3c9"),
},
// Base Mainnet
8453: {
OptimismPortal: common.HexToAddress("0x49048044D57e1C92A77f79988d21Fa8fAF74E97e"),
L2OutputOracle: common.HexToAddress("0x56315b90c40730925ec5485cf004d835058518A0"),
L1CrossDomainMessenger: common.HexToAddress("0x866E82a600A1414e583f7F13623F1aC5d58b0Afa"),
L1StandardBridge: common.HexToAddress("0x3154Cf16ccdb4C6d922629664174b904d80F2C35"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
// Base Goerli
84531: {
OptimismPortal: common.HexToAddress("0xe93c8cD0D409341205A592f8c4Ac1A5fe5585cfA"),
L2OutputOracle: common.HexToAddress("0x2A35891ff30313CcFa6CE88dcf3858bb075A2298"),
L1CrossDomainMessenger: common.HexToAddress("0x8e5693140eA606bcEB98761d9beB1BC87383706D"),
L1StandardBridge: common.HexToAddress("0xfA6D8Ee5BE770F84FC001D098C4bD604Fe01284a"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
// Zora mainnet
7777777: {
OptimismPortal: common.HexToAddress("0x1a0ad011913A150f69f6A19DF447A0CfD9551054"),
L2OutputOracle: common.HexToAddress("0x9E6204F750cD866b299594e2aC9eA824E2e5f95c"),
L1CrossDomainMessenger: common.HexToAddress("0xdC40a14d9abd6F410226f1E6de71aE03441ca506"),
L1StandardBridge: common.HexToAddress("0x3e2Ea9B92B7E48A52296fD261dc26fd995284631"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
// Zora goerli
999: {
OptimismPortal: common.HexToAddress("0xDb9F51790365e7dc196e7D072728df39Be958ACe"),
L2OutputOracle: common.HexToAddress("0xdD292C9eEd00f6A32Ff5245d0BCd7f2a15f24e00"),
L1CrossDomainMessenger: common.HexToAddress("0xD87342e16352D33170557A7dA1e5fB966a60FafC"),
L1StandardBridge: common.HexToAddress("0x7CC09AC2452D6555d5e0C213Ab9E2d44eFbFc956"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
},
}
package indexer
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
)
// ParseAddress parses a ETH address from a hex string. This method will
// fail if the address is not a valid hexadecimal address.
func ParseAddress(address string) (common.Address, error) {
if common.IsHexAddress(address) {
return common.HexToAddress(address), nil
}
return common.Address{}, fmt.Errorf("invalid address: %v", address)
}
package indexer_test
import (
"bytes"
"errors"
"testing"
indexer "github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
// TestParseAddress asserts that ParseAddress correctly parses
// 40-characater hexadecimal strings with optional 0x prefix into valid 20-byte
// addresses
func TestParseAddress(t *testing.T) {
tests := []struct {
name string
addr string
expErr error
expAddr common.Address
}{
{
name: "empty address",
addr: "",
expErr: errors.New("invalid address: "),
},
{
name: "only 0x",
addr: "0x",
expErr: errors.New("invalid address: 0x"),
},
{
name: "non hex character",
addr: "0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: errors.New("invalid address: 0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
{
name: "valid address",
addr: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: nil,
expAddr: common.BytesToAddress(bytes.Repeat([]byte{170}, 20)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
addr, err := indexer.ParseAddress(test.addr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
require.Equal(t, addr, test.expAddr)
})
}
}
...@@ -23,17 +23,17 @@ type BlockHeader struct { ...@@ -23,17 +23,17 @@ type BlockHeader struct {
Number U256 Number U256
Timestamp uint64 Timestamp uint64
GethHeader *GethHeader `gorm:"serializer:rlp;column:rlp_bytes"` RLPHeader *RLPHeader `gorm:"serializer:rlp;column:rlp_bytes"`
} }
func BlockHeaderFromGethHeader(header *types.Header) BlockHeader { func BlockHeaderFromHeader(header *types.Header) BlockHeader {
return BlockHeader{ return BlockHeader{
Hash: header.Hash(), Hash: header.Hash(),
ParentHash: header.ParentHash, ParentHash: header.ParentHash,
Number: U256{Int: header.Number}, Number: U256{Int: header.Number},
Timestamp: header.Time, Timestamp: header.Time,
GethHeader: (*GethHeader)(header), RLPHeader: (*RLPHeader)(header),
} }
} }
......
...@@ -2,6 +2,7 @@ package database ...@@ -2,6 +2,7 @@ package database
import ( import (
"errors" "errors"
"math/big"
"gorm.io/gorm" "gorm.io/gorm"
...@@ -16,19 +17,23 @@ import ( ...@@ -16,19 +17,23 @@ import (
*/ */
type ContractEvent struct { type ContractEvent struct {
GUID uuid.UUID `gorm:"primaryKey"` GUID uuid.UUID `gorm:"primaryKey"`
// Some useful derived fields
BlockHash common.Hash `gorm:"serializer:json"` BlockHash common.Hash `gorm:"serializer:json"`
ContractAddress common.Address `gorm:"serializer:json"` ContractAddress common.Address `gorm:"serializer:json"`
TransactionHash common.Hash `gorm:"serializer:json"` TransactionHash common.Hash `gorm:"serializer:json"`
LogIndex uint64
EventSignature common.Hash `gorm:"serializer:json"` EventSignature common.Hash `gorm:"serializer:json"`
LogIndex uint64
Timestamp uint64 Timestamp uint64
GethLog *types.Log `gorm:"serializer:rlp;column:rlp_bytes"` // NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE
// AVAILABLE. FIELDS LISTED ABOVE ARE FILLED IN
RLPLog *types.Log `gorm:"serializer:rlp;column:rlp_bytes"`
} }
func ContractEventFromGethLog(log *types.Log, timestamp uint64) ContractEvent { func ContractEventFromLog(log *types.Log, timestamp uint64) ContractEvent {
eventSig := common.Hash{} eventSig := common.Hash{}
if len(log.Topics) > 0 { if len(log.Topics) > 0 {
eventSig = log.Topics[0] eventSig = log.Topics[0]
...@@ -38,18 +43,27 @@ func ContractEventFromGethLog(log *types.Log, timestamp uint64) ContractEvent { ...@@ -38,18 +43,27 @@ func ContractEventFromGethLog(log *types.Log, timestamp uint64) ContractEvent {
GUID: uuid.New(), GUID: uuid.New(),
BlockHash: log.BlockHash, BlockHash: log.BlockHash,
ContractAddress: log.Address,
TransactionHash: log.TxHash, TransactionHash: log.TxHash,
ContractAddress: log.Address,
EventSignature: eventSig, EventSignature: eventSig,
LogIndex: uint64(log.Index), LogIndex: uint64(log.Index),
Timestamp: timestamp, Timestamp: timestamp,
GethLog: log, RLPLog: log,
} }
} }
func (c *ContractEvent) AfterFind(tx *gorm.DB) error {
// Fill in some of the derived fields that are not
// populated when decoding the RLPLog from RLP
c.RLPLog.BlockHash = c.BlockHash
c.RLPLog.TxHash = c.TransactionHash
c.RLPLog.Index = uint(c.LogIndex)
return nil
}
type L1ContractEvent struct { type L1ContractEvent struct {
ContractEvent `gorm:"embedded"` ContractEvent `gorm:"embedded"`
} }
...@@ -61,9 +75,11 @@ type L2ContractEvent struct { ...@@ -61,9 +75,11 @@ type L2ContractEvent struct {
type ContractEventsView interface { type ContractEventsView interface {
L1ContractEvent(uuid.UUID) (*L1ContractEvent, error) L1ContractEvent(uuid.UUID) (*L1ContractEvent, error)
L1ContractEventByTxLogIndex(common.Hash, uint64) (*L1ContractEvent, error) L1ContractEventByTxLogIndex(common.Hash, uint64) (*L1ContractEvent, error)
L1ContractEventsWithFilter(ContractEvent, *big.Int, *big.Int) ([]L1ContractEvent, error)
L2ContractEvent(uuid.UUID) (*L2ContractEvent, error) L2ContractEvent(uuid.UUID) (*L2ContractEvent, error)
L2ContractEventByTxLogIndex(common.Hash, uint64) (*L2ContractEvent, error) L2ContractEventByTxLogIndex(common.Hash, uint64) (*L2ContractEvent, error)
L2ContractEventsWithFilter(ContractEvent, *big.Int, *big.Int) ([]L2ContractEvent, error)
} }
type ContractEventsDB interface { type ContractEventsDB interface {
...@@ -120,6 +136,30 @@ func (db *contractEventsDB) L1ContractEventByTxLogIndex(txHash common.Hash, logI ...@@ -120,6 +136,30 @@ func (db *contractEventsDB) L1ContractEventByTxLogIndex(txHash common.Hash, logI
return &l1ContractEvent, nil return &l1ContractEvent, nil
} }
func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fromHeight, toHeight *big.Int) ([]L1ContractEvent, error) {
if fromHeight == nil {
fromHeight = big.NewInt(0)
}
query := db.gorm.Table("l1_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l1_block_headers ON l1_contract_events.block_hash = l1_block_headers.hash")
query = query.Where("l1_block_headers.number >= ? AND l1_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l1_block_headers.number ASC").Select("l1_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same
var events []L1ContractEvent
result := query.Find(&events)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return events, nil
}
// L2 // L2
func (db *contractEventsDB) StoreL2ContractEvents(events []*L2ContractEvent) error { func (db *contractEventsDB) StoreL2ContractEvents(events []*L2ContractEvent) error {
...@@ -154,3 +194,27 @@ func (db *contractEventsDB) L2ContractEventByTxLogIndex(txHash common.Hash, logI ...@@ -154,3 +194,27 @@ func (db *contractEventsDB) L2ContractEventByTxLogIndex(txHash common.Hash, logI
return &l2ContractEvent, nil return &l2ContractEvent, nil
} }
func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fromHeight, toHeight *big.Int) ([]L2ContractEvent, error) {
if fromHeight == nil {
fromHeight = big.NewInt(0)
}
query := db.gorm.Table("l2_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l2_block_headers ON l2_contract_events.block_hash = l2_block_headers.hash")
query = query.Where("l2_block_headers.number >= ? AND l2_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l2_block_headers.number ASC").Select("l2_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same
var events []L2ContractEvent
result := query.Find(&events)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return events, nil
}
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
package database package database
import ( import (
"fmt"
"github.com/ethereum-optimism/optimism/indexer/config"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger" "gorm.io/gorm/logger"
...@@ -17,7 +20,14 @@ type DB struct { ...@@ -17,7 +20,14 @@ type DB struct {
BridgeTransactions BridgeTransactionsDB BridgeTransactions BridgeTransactionsDB
} }
func NewDB(dsn string) (*DB, error) { func NewDB(dbConfig config.DBConfig) (*DB, error) {
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Port, dbConfig.Name)
if dbConfig.User != "" {
dsn += fmt.Sprintf(" user=%s", dbConfig.User)
}
if dbConfig.Password != "" {
dsn += fmt.Sprintf(" password=%s", dbConfig.Password)
}
gorm, err := gorm.Open(postgres.Open(dsn), &gorm.Config{ gorm, err := gorm.Open(postgres.Open(dsn), &gorm.Config{
// The indexer will explicitly manage the transaction // The indexer will explicitly manage the transaction
// flow processing blocks // flow processing blocks
......
...@@ -68,13 +68,13 @@ func (u256 U256) Value() (driver.Value, error) { ...@@ -68,13 +68,13 @@ func (u256 U256) Value() (driver.Value, error) {
return numeric.Value() return numeric.Value()
} }
type GethHeader types.Header type RLPHeader types.Header
func (h *GethHeader) EncodeRLP(w io.Writer) error { func (h *RLPHeader) EncodeRLP(w io.Writer) error {
return types.NewBlockWithHeader((*types.Header)(h)).EncodeRLP(w) return types.NewBlockWithHeader((*types.Header)(h)).EncodeRLP(w)
} }
func (h *GethHeader) DecodeRLP(s *rlp.Stream) error { func (h *RLPHeader) DecodeRLP(s *rlp.Stream) error {
block := new(types.Block) block := new(types.Block)
err := block.DecodeRLP(s) err := block.DecodeRLP(s)
if err != nil { if err != nil {
...@@ -82,14 +82,14 @@ func (h *GethHeader) DecodeRLP(s *rlp.Stream) error { ...@@ -82,14 +82,14 @@ func (h *GethHeader) DecodeRLP(s *rlp.Stream) error {
} }
header := block.Header() header := block.Header()
*h = (GethHeader)(*header) *h = (RLPHeader)(*header)
return nil return nil
} }
func (h *GethHeader) Header() *types.Header { func (h *RLPHeader) Header() *types.Header {
return (*types.Header)(h) return (*types.Header)(h)
} }
func (h *GethHeader) Hash() common.Hash { func (h *RLPHeader) Hash() common.Hash {
return h.Header().Hash() return h.Header().Hash()
} }
package db
type Airdrop struct {
Address string `json:"address"`
VoterAmount string `json:"voterAmount"`
MultisigSignerAmount string `json:"multisigSignerAmount"`
GitcoinAmount string `json:"gitcoinAmount"`
ActiveBridgedAmount string `json:"activeBridgedAmount"`
OpUserAmount string `json:"opUserAmount"`
OpRepeatUserAmount string `json:"opRepeatUserAmount"`
BonusAmount string `json:"bonusAmount"`
TotalAmount string `json:"totalAmount"`
}
This diff is collapsed.
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// Deposit contains transaction data for deposits made via the L1 to L2 bridge.
type Deposit struct {
GUID string
TxHash common.Hash
L1Token common.Address
L2Token common.Address
FromAddress common.Address
ToAddress common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
// String returns the tx hash for the deposit.
func (d Deposit) String() string {
return d.TxHash.String()
}
// DepositJSON contains Deposit data suitable for JSON serialization.
type DepositJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token *Token `json:"l1Token"`
L2Token string `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
}
package db
import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
)
var ETHL1Address common.Address
// ETHL1Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L1.
var ETHL1Token = &Token{
Address: ETHL1Address.String(),
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
// ETHL2Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L2.
var ETHL2Token = &Token{
Address: predeploys.LegacyERC20ETH,
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
package db
import "github.com/google/uuid"
// NewGUID returns a new guid.
func NewGUID() string {
return uuid.New().String()
}
package db
import (
"github.com/ethereum/go-ethereum/common"
)
// IndexedL1Block contains the L1 block including the deposits in it.
type IndexedL1Block struct {
Hash common.Hash
ParentHash common.Hash
Number uint64
Timestamp uint64
Deposits []Deposit
ProvenWithdrawals []ProvenWithdrawal
FinalizedWithdrawals []FinalizedWithdrawal
}
// String returns the block hash for the indexed l1 block.
func (b IndexedL1Block) String() string {
return b.Hash.String()
}
// IndexedL2Block contains the L2 block including the withdrawals in it.
type IndexedL2Block struct {
Hash common.Hash
ParentHash common.Hash
Number uint64
Timestamp uint64
Withdrawals []Withdrawal
}
// String returns the block hash for the indexed l2 block.
func (b IndexedL2Block) String() string {
return b.Hash.String()
}
package db
import (
"github.com/ethereum/go-ethereum/common"
)
// BlockLocator contains the block number and hash. It can
// uniquely identify an Ethereum block
type BlockLocator struct {
Number uint64 `json:"number"`
Hash common.Hash `json:"hash"`
}
package db
// PaginationParam holds the pagination fields passed through by the REST
// middleware and queried by the database to page through deposits and
// withdrawals.
type PaginationParam struct {
Limit uint64 `json:"limit"`
Offset uint64 `json:"offset"`
Total uint64 `json:"total"`
}
type PaginatedDeposits struct {
Param *PaginationParam `json:"pagination"`
Deposits []DepositJSON `json:"items"`
}
type PaginatedWithdrawals struct {
Param *PaginationParam `json:"pagination"`
Withdrawals []WithdrawalJSON `json:"items"`
}
package db
const createL1BlocksTable = `
CREATE TABLE IF NOT EXISTS l1_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createL2BlocksTable = `
CREATE TABLE IF NOT EXISTS l2_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createDepositsTable = `
CREATE TABLE IF NOT EXISTS deposits (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL REFERENCES l1_tokens(address),
l2_token VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash),
tx_hash VARCHAR NOT NULL
)
`
const createL1TokensTable = `
CREATE TABLE IF NOT EXISTS l1_tokens (
address VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL
)
`
const createL2TokensTable = `
CREATE TABLE IF NOT EXISTS l2_tokens (
address TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INTEGER NOT NULL
)
`
const createStateBatchesTable = `
CREATE TABLE IF NOT EXISTS state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
extra_data BYTEA NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash)
);
CREATE INDEX IF NOT EXISTS state_batches_block_hash ON state_batches(block_hash);
CREATE INDEX IF NOT EXISTS state_batches_size ON state_batches(size);
CREATE INDEX IF NOT EXISTS state_batches_prev_total ON state_batches(prev_total);
`
const createWithdrawalsTable = `
CREATE TABLE IF NOT EXISTS withdrawals (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL,
l2_token VARCHAR NOT NULL REFERENCES l2_tokens(address),
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l2_blocks(hash),
tx_hash VARCHAR NOT NULL,
state_batch INTEGER REFERENCES state_batches(index)
)
`
const insertETHL1Token = `
INSERT INTO l1_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
// earlier transactions used 0x0000000000000000000000000000000000000000 as
// address of ETH so insert both that and
// 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000
const insertETHL2Token = `
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
const createL1L2NumberIndex = `
CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number);
CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number);
`
const createAirdropsTable = `
CREATE TABLE IF NOT EXISTS airdrops (
address VARCHAR(42) PRIMARY KEY,
voter_amount VARCHAR NOT NULL DEFAULT '0' CHECK(voter_amount ~ '^\d+$') ,
multisig_signer_amount VARCHAR NOT NULL DEFAULT '0' CHECK(multisig_signer_amount ~ '^\d+$'),
gitcoin_amount VARCHAR NOT NULL DEFAULT '0' CHECK(gitcoin_amount ~ '^\d+$'),
active_bridged_amount VARCHAR NOT NULL DEFAULT '0' CHECK(active_bridged_amount ~ '^\d+$'),
op_user_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_user_amount ~ '^\d+$'),
op_repeat_user_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_user_amount ~ '^\d+$'),
op_og_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_og_amount ~ '^\d+$'),
bonus_amount VARCHAR NOT NULL DEFAULT '0' CHECK(bonus_amount ~ '^\d+$'),
total_amount VARCHAR NOT NULL CHECK(voter_amount ~ '^\d+$')
)
`
const updateWithdrawalsTable = `
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_hash VARCHAR NULL;
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_proven_tx_hash VARCHAR NULL;
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_proven_log_index INTEGER NULL;
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_finalized_tx_hash VARCHAR NULL;
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_finalized_log_index INTEGER NULL;
ALTER TABLE withdrawals ADD COLUMN IF NOT EXISTS br_withdrawal_finalized_success BOOLEAN NULL;
CREATE INDEX IF NOT EXISTS withdrawals_br_withdrawal_hash ON withdrawals(br_withdrawal_hash);
`
var schema = []string{
createL1BlocksTable,
createL2BlocksTable,
createL1TokensTable,
createL2TokensTable,
createStateBatchesTable,
insertETHL1Token,
insertETHL2Token,
createDepositsTable,
createWithdrawalsTable,
createL1L2NumberIndex,
createAirdropsTable,
updateWithdrawalsTable,
}
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// StateBatch is the state batch containing merkle root of the withdrawals
// periodically written to L1.
type StateBatch struct {
Index *big.Int
Root common.Hash
Size *big.Int
PrevTotal *big.Int
ExtraData []byte
BlockHash common.Hash
}
// StateBatchJSON contains StateBatch data suitable for JSON serialization.
type StateBatchJSON struct {
Index uint64 `json:"index"`
Root string `json:"root"`
Size uint64 `json:"size"`
PrevTotal uint64 `json:"prevTotal"`
ExtraData []byte `json:"extraData"`
BlockHash string `json:"blockHash"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp uint64 `json:"blockTimestamp"`
}
package db
// Token contains the token details of the ERC20 contract at the given address.
// NOTE: The Token address will almost definitely be different on L1 and L2, so
// we need to track it on both chains when handling transactions.
type Token struct {
Address string `json:"address"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
package db
import "database/sql"
func txn(db *sql.DB, apply func(*sql.Tx) error) error {
tx, err := db.Begin()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
// Ignore since we're panicking anyway
_ = tx.Rollback()
panic(p)
}
}()
err = apply(tx)
if err != nil {
// Don't swallow application error
_ = tx.Rollback()
return err
}
return tx.Commit()
}
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// Withdrawal contains transaction data for withdrawals made via the L2 to L1 bridge.
type Withdrawal struct {
GUID string
TxHash common.Hash
L1Token common.Address
L2Token common.Address
FromAddress common.Address
ToAddress common.Address
Amount *big.Int
Data []byte
LogIndex uint
BedrockHash *common.Hash
}
// String returns the tx hash for the withdrawal.
func (w Withdrawal) String() string {
return w.TxHash.String()
}
// WithdrawalJSON contains Withdrawal data suitable for JSON serialization.
type WithdrawalJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token string `json:"l1Token"`
L2Token *Token `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
Batch *StateBatchJSON `json:"batch"`
BedrockWithdrawalHash *string `json:"bedrockWithdrawalHash"`
BedrockProvenTxHash *string `json:"bedrockProvenTxHash"`
BedrockProvenLogIndex *int `json:"bedrockProvenLogIndex"`
BedrockFinalizedTxHash *string `json:"bedrockFinalizedTxHash"`
BedrockFinalizedLogIndex *int `json:"bedrockFinalizedLogIndex"`
BedrockFinalizedSuccess *bool `json:"bedrockFinalizedSuccess"`
}
type FinalizationState int
const (
FinalizationStateAny FinalizationState = iota
FinalizationStateFinalized
FinalizationStateUnfinalized
)
func ParseFinalizationState(in string) FinalizationState {
switch in {
case "true":
return FinalizationStateFinalized
case "false":
return FinalizationStateUnfinalized
default:
return FinalizationStateAny
}
}
func (f FinalizationState) SQL() string {
switch f {
case FinalizationStateFinalized:
return "AND withdrawals.br_withdrawal_finalized_tx_hash IS NOT NULL"
case FinalizationStateUnfinalized:
return "AND withdrawals.br_withdrawal_finalized_tx_hash IS NULL"
}
return ""
}
type ProvenWithdrawal struct {
From common.Address
To common.Address
WithdrawalHash common.Hash
TxHash common.Hash
LogIndex uint
}
type FinalizedWithdrawal struct {
WithdrawalHash common.Hash
TxHash common.Hash
Success bool
LogIndex uint
}
...@@ -17,6 +17,29 @@ services: ...@@ -17,6 +17,29 @@ services:
- postgres_data:/data/postgres - postgres_data:/data/postgres
indexer: indexer:
build:
context: ..
dockerfile: indexer/Dockerfile.refresh
command: ["indexer-refresh", "processor"]
# healthcheck:
# Add healthcheck once figure out good way how
# maybe after we add metrics?
ports:
- 8080:8080
environment:
- INDEXER_DB_PORT=5432
- INDEXER_DB_USER=db_username
- INDEXER_DB_PASSWORD=db_password
- INDEXER_DB_NAME=db_name
- INDEXER_DB_HOST=postgres
- INDEXER_CONFIG=/configs/indexer.toml
volumes:
- ./indexer.toml:/configs/indexer.toml
depends_on:
postgres:
condition: service_healthy
api:
build: build:
context: .. context: ..
dockerfile: indexer/Dockerfile dockerfile: indexer/Dockerfile
...@@ -75,5 +98,121 @@ services: ...@@ -75,5 +98,121 @@ services:
postgres: postgres:
condition: service_healthy condition: service_healthy
gateway-frontend:
command: pnpm nx start @gateway/frontend --host 0.0.0.0 --port 5173
# Change tag to `latest` after https://github.com/ethereum-optimism/gateway/pull/2541 merges
image: ethereumoptimism/gateway-frontend:0687c408e4f85cbe81acf7a197e861df8c7282df
ports:
- 5173:5173
healthcheck:
test: curl http://0.0.0.0:5173
environment:
- VITE_GROWTHBOOK=${VITE_GROWTHBOOK:-https://cdn.growthbook.io/api/features/dev_iGoAbSwtGOtEJONeHdVTosV0BD3TvTPttAccGyRxqsk}
- VITE_ENABLE_DEVNET=true
- VITE_RPC_URL_ETHEREUM_MAINNET=$VITE_RPC_URL_ETHEREUM_MAINNET
- VITE_RPC_URL_ETHEREUM_OPTIMISM_MAINNET=$VITE_RPC_URL_OPTIMISM_MAINNET
- VITE_RPC_URL_ETHEREUM_GOERLI=$VITE_RPC_URL_ETHEREUM_GOERLI
- VITE_RPC_URL_ETHEREUM_OPTIMISM_GOERLI=$VITE_RPC_URL_OPTIMISM_GOERLI
- VITE_BACKEND_URL_MAINNET=http://localhost:7421
- VITE_BACKEND_URL_GOERLI=http://localhost:7422
- VITE_ENABLE_ALL_FEATURES=true
backend-mainnet:
image: ethereumoptimism/gateway-backend:latest
environment:
# this enables the backend to proxy history requests to the indexer
- BRIDGE_INDEXER_URI=http://api
- HOST=0.0.0.0
- PORT=7300
- MIGRATE_APP_DB_USER=${MIGRATE_APP_DB_USER:-postgres}
- MIGRATE_APP_DB_PASSWORD=${MIGRATE_APP_DB_PASSWORD:-db_password}
- APP_DB_HOST=${APP_DB_HOST:-postgres-app}
- APP_DB_USER=${APP_DB_USER:-gateway-backend-mainnet@oplabs-local-web.iam}
- APP_DB_NAME=${APP_DB_NAME:-gateway}
- APP_DB_PORT=${APP_DB_PORT:-5432}
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_HOST=postgres-mainnet
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_USER=db_username
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PASS=db_password
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_NAME=db_name
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PORT=5432
# THis is for the legacy indexer which won't be used but the env variable is still required
- DATABASE_URL=postgres://db_username:db_password@postgres-mainnet:5432/db_name
- JSON_RPC_URLS_L1=$JSON_RPC_URLS_L1_MAINNET
- JSON_RPC_URLS_L2=$JSON_RPC_URLS_L2_MAINNET
- JSON_RPC_URLS_L2_GOERLI=$JSON_RPC_URLS_L2_GOERLI
# anvil[0] privater key as placeholder
- FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY=${$FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}
- IRON_SESSION_SECRET=${IRON_SESSION_SECRET:-UNKNOWN_IRON_SESSION_PASSWORD_32}
- CHAIN_ID_L1=1
- CHAIN_ID_L2=10
- FLEEK_BUCKET_ADDRESS=34a609661-6774-441f-9fdb-453fdbb89931-bucket
- FLEEK_API_SECRET=$FLEEK_API_SECRET
- FLEEK_API_KEY=$FLEEK_API_KEY
- MOCK_MERKLE_PROOF=true
- LOOP_INTERVAL_MINUTES=.1
- GITHUB_CLIENT_ID=$GITHUB_CLIENT_ID
- GITHUB_SECRET=$GITHUB_SECRET
- MAINNET_BEDROCK=$MAINNET_BEDROCK
- TRM_API_KEY=$TRM_API_KEY
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content
# Recommened to uncomment for local dev unless you need it
#- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true
ports:
- 7421:7300
# overrides command in Dockerfile so we can hot reload the server in docker while developing
#command: ['pnpm', 'nx', 'run', '@gateway/backend:docker:watch']
healthcheck:
test: curl http://0.0.0.0:7300/api/v0/healthz
backend-goerli:
image: ethereumoptimism/gateway-backend:latest
environment:
# this enables the backend to proxy history requests to the indexer
- BRIDGE_INDEXER_URI=http://api
- HOST=0.0.0.0
- PORT=7300
- MIGRATE_APP_DB_USER=${MIGRATE_APP_DB_USER:-postgres}
- MIGRATE_APP_DB_PASSWORD=${MIGRATE_APP_DB_PASSWORD:-db_password}
- APP_DB_HOST=${APP_DB_HOST:-postgres-app}
- APP_DB_USER=${APP_DB_USER:-gateway-backend-goerli@oplabs-local-web.iam}
- APP_DB_NAME=${APP_DB_NAME:-gateway}
- APP_DB_PORT=${APP_DB_PORT:-5432}
- INDEXER_DB_HOST=${INDEXER_DB_HOST_GOERLI:-postgres-goerli}
- INDEXER_DB_USER=${INDEXER_DB_USER_GOERLI:-db_username}
- INDEXER_DB_PASS=${INDEXER_DB_PASSWORD_GOERLI:-db_password}
- INDEXER_DB_NAME=${INDEXER_DB_NAME_GOERLI:-db_name}
- INDEXER_DB_PORT=${INDEXER_DB_PORT_GOERLI:-5432}
- DATABASE_URL=${DATABASE_URL_GOERLI:-postgres://db_username:db_password@postgres-goerli:5432/db_name}
- JSON_RPC_URLS_L1=$JSON_RPC_URLS_L1_GOERLI
- JSON_RPC_URLS_L2=$JSON_RPC_URLS_L2_GOERLI
- JSON_RPC_URLS_L2_GOERLI=$JSON_RPC_URLS_L2_GOERLI
- FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY=$FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY
- IRON_SESSION_SECRET=${IRON_SESSION_SECRET:-UNKNOWN_IRON_SESSION_PASSWORD_32}
- CHAIN_ID_L1=5
- CHAIN_ID_L2=420
- FLEEK_BUCKET_ADDRESS=34a609661-6774-441f-9fdb-453fdbb89931-bucket
- FLEEK_API_SECRET=$FLEEK_API_SECRET
- FLEEK_API_KEY=$FLEEK_API_KEY
- MOCK_MERKLE_PROOF=true
- LOOP_INTERVAL_MINUTES=.1
- GITHUB_CLIENT_ID=$GITHUB_CLIENT_ID
- GITHUB_SECRET=$GITHUB_SECRET
- MAINNET_BEDROCK=true
- TRM_API_KEY=$TRM_API_KEY
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content
# Recommened to uncomment for local dev unless you need it
#- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true
ports:
- 7422:7300
# overrides command in Dockerfile so we can hot reload the server in docker while developing
#command: ['pnpm', 'nx', 'run', '@gateway/backend:docker:watch']
healthcheck:
test: curl http://0.0.0.0:7300/api/v0/healthz
volumes: volumes:
postgres_data: postgres_data:
...@@ -7,10 +7,9 @@ import ( ...@@ -7,10 +7,9 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
...@@ -28,7 +27,7 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -28,7 +27,7 @@ func TestE2EBlockHeaders(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// wait for at least 10 L2 blocks to be created & posted on L1 // wait for at least 10 L2 blocks to be created & posted on L1
require.NoError(t, utils.WaitFor(context.Background(), time.Second, func() (bool, error) { require.NoError(t, wait.For(context.Background(), time.Second, func() (bool, error) {
l2Height, err := l2OutputOracle.LatestBlockNumber(&bind.CallOpts{Context: context.Background()}) l2Height, err := l2OutputOracle.LatestBlockNumber(&bind.CallOpts{Context: context.Background()})
return l2Height != nil && l2Height.Uint64() >= 9, err return l2Height != nil && l2Height.Uint64() >= 9, err
})) }))
...@@ -36,7 +35,7 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -36,7 +35,7 @@ func TestE2EBlockHeaders(t *testing.T) {
// ensure the processors are caught up to this state // ensure the processors are caught up to this state
l1Height, err := testSuite.L1Client.BlockNumber(context.Background()) l1Height, err := testSuite.L1Client.BlockNumber(context.Background())
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, utils.WaitFor(context.Background(), time.Second, func() (bool, error) { require.NoError(t, wait.For(context.Background(), time.Second, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil
...@@ -65,7 +64,7 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -65,7 +64,7 @@ func TestE2EBlockHeaders(t *testing.T) {
require.Equal(t, header.Time, indexedHeader.Timestamp) require.Equal(t, header.Time, indexedHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes sufficies // ensure the right rlp encoding is stored. checking the hashes sufficies
require.Equal(t, header.Hash(), indexedHeader.GethHeader.Hash()) require.Equal(t, header.Hash(), indexedHeader.RLPHeader.Hash())
} }
}) })
...@@ -127,7 +126,7 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -127,7 +126,7 @@ func TestE2EBlockHeaders(t *testing.T) {
// ensure the right rlp encoding of the contract log is stored // ensure the right rlp encoding of the contract log is stored
logRlp, err := rlp.EncodeToBytes(&log) logRlp, err := rlp.EncodeToBytes(&log)
require.NoError(t, err) require.NoError(t, err)
contractEventRlp, err := rlp.EncodeToBytes(contractEvent.GethLog) contractEventRlp, err := rlp.EncodeToBytes(contractEvent.RLPLog)
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, logRlp, contractEventRlp) require.ElementsMatch(t, logRlp, contractEventRlp)
...@@ -146,7 +145,7 @@ func TestE2EBlockHeaders(t *testing.T) { ...@@ -146,7 +145,7 @@ func TestE2EBlockHeaders(t *testing.T) {
// ensure the right rlp encoding is stored. checking the hashes // ensure the right rlp encoding is stored. checking the hashes
// suffices as it is based on the rlp bytes of the header // suffices as it is based on the rlp bytes of the header
require.Equal(t, block.Hash(), l1BlockHeader.GethHeader.Hash()) require.Equal(t, block.Hash(), l1BlockHeader.RLPHeader.Hash())
} }
}) })
} }
...@@ -10,9 +10,8 @@ import ( ...@@ -10,9 +10,8 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/withdrawals" "github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
...@@ -39,14 +38,14 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) { ...@@ -39,14 +38,14 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
// (1) Send the Message // (1) Send the Message
sentMsgTx, err := l1CrossDomainMessenger.SendMessage(l1Opts, aliceAddr, calldata, 100_000) sentMsgTx, err := l1CrossDomainMessenger.SendMessage(l1Opts, aliceAddr, calldata, 100_000)
require.NoError(t, err) require.NoError(t, err)
sentMsgReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L1Client, sentMsgTx.Hash()) sentMsgReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L1Client, sentMsgTx.Hash())
require.NoError(t, err) require.NoError(t, err)
depositInfo, err := e2etest_utils.ParseDepositInfo(sentMsgReceipt) depositInfo, err := e2etest_utils.ParseDepositInfo(sentMsgReceipt)
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -77,9 +76,9 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) { ...@@ -77,9 +76,9 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
depositReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, transaction.L2TransactionHash) depositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, transaction.L2TransactionHash)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -117,13 +116,13 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) { ...@@ -117,13 +116,13 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
l1Opts.Value = l2Opts.Value l1Opts.Value = l2Opts.Value
depositTx, err := optimismPortal.Receive(l1Opts) depositTx, err := optimismPortal.Receive(l1Opts)
require.NoError(t, err) require.NoError(t, err)
_, err = utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) _, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// (1) Send the Message // (1) Send the Message
sentMsgTx, err := l2CrossDomainMessenger.SendMessage(l2Opts, aliceAddr, calldata, 100_000) sentMsgTx, err := l2CrossDomainMessenger.SendMessage(l2Opts, aliceAddr, calldata, 100_000)
require.NoError(t, err) require.NoError(t, err)
sentMsgReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, sentMsgTx.Hash()) sentMsgReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, sentMsgTx.Hash())
require.NoError(t, err) require.NoError(t, err)
msgPassed, err := withdrawals.ParseMessagePassed(sentMsgReceipt) msgPassed, err := withdrawals.ParseMessagePassed(sentMsgReceipt)
...@@ -132,7 +131,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) { ...@@ -132,7 +131,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -161,7 +160,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) { ...@@ -161,7 +160,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
_, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt) _, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizedReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizedReceipt.BlockNumber.Uint64(), nil
})) }))
......
...@@ -7,13 +7,12 @@ import ( ...@@ -7,13 +7,12 @@ import (
"time" "time"
e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils" e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/withdrawals" "github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
...@@ -37,7 +36,7 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) { ...@@ -37,7 +36,7 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) {
depositTx, err := optimismPortal.DepositTransaction(l1Opts, aliceAddr, l1Opts.Value, 100_000, false, calldata) depositTx, err := optimismPortal.DepositTransaction(l1Opts, aliceAddr, l1Opts.Value, 100_000, false, calldata)
require.NoError(t, err) require.NoError(t, err)
depositReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) depositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
depositInfo, err := e2etest_utils.ParseDepositInfo(depositReceipt) depositInfo, err := e2etest_utils.ParseDepositInfo(depositReceipt)
...@@ -46,7 +45,7 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) { ...@@ -46,7 +45,7 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) {
depositL2TxHash := types.NewTx(depositInfo.DepositTx).Hash() depositL2TxHash := types.NewTx(depositInfo.DepositTx).Hash()
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -94,16 +93,16 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) { ...@@ -94,16 +93,16 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
l1Opts.Value = l2Opts.Value l1Opts.Value = l2Opts.Value
depositTx, err := optimismPortal.Receive(l1Opts) depositTx, err := optimismPortal.Receive(l1Opts)
require.NoError(t, err) require.NoError(t, err)
_, err = utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) _, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
withdrawTx, err := l2ToL1MessagePasser.InitiateWithdrawal(l2Opts, aliceAddr, big.NewInt(100_000), calldata) withdrawTx, err := l2ToL1MessagePasser.InitiateWithdrawal(l2Opts, aliceAddr, big.NewInt(100_000), calldata)
require.NoError(t, err) require.NoError(t, err)
withdrawReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash()) withdrawReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= withdrawReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= withdrawReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -132,7 +131,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) { ...@@ -132,7 +131,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
require.Nil(t, withdraw.FinalizedL1EventGUID) require.Nil(t, withdraw.FinalizedL1EventGUID)
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -150,7 +149,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) { ...@@ -150,7 +149,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
require.Nil(t, withdraw.FinalizedL1EventGUID) require.Nil(t, withdraw.FinalizedL1EventGUID)
finalizeReceipt := op_e2e.FinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpCfg.Secrets.Alice, proveReceipt, withdrawParams) finalizeReceipt := op_e2e.FinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpCfg.Secrets.Alice, proveReceipt, withdrawParams)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -182,7 +181,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T) ...@@ -182,7 +181,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T)
withdrawTx, err := l2ToL1MessagePasser.InitiateWithdrawal(l2Opts, aliceAddr, big.NewInt(100_000), nil) withdrawTx, err := l2ToL1MessagePasser.InitiateWithdrawal(l2Opts, aliceAddr, big.NewInt(100_000), nil)
require.NoError(t, err) require.NoError(t, err)
withdrawReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash()) withdrawReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash())
require.NoError(t, err) require.NoError(t, err)
msgPassed, err := withdrawals.ParseMessagePassed(withdrawReceipt) msgPassed, err := withdrawals.ParseMessagePassed(withdrawReceipt)
...@@ -192,7 +191,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T) ...@@ -192,7 +191,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T)
// Prove&Finalize withdrawal // Prove&Finalize withdrawal
_, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) _, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
})) }))
......
...@@ -9,12 +9,11 @@ import ( ...@@ -9,12 +9,11 @@ import (
e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils" e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils"
"github.com/ethereum-optimism/optimism/indexer/processor" "github.com/ethereum-optimism/optimism/indexer/processor"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/withdrawals" "github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
...@@ -37,14 +36,14 @@ func TestE2EBridgeTransfersStandardBridgeETHDeposit(t *testing.T) { ...@@ -37,14 +36,14 @@ func TestE2EBridgeTransfersStandardBridgeETHDeposit(t *testing.T) {
// (1) Test Deposit Initiation // (1) Test Deposit Initiation
depositTx, err := l1StandardBridge.DepositETH(l1Opts, 200_000, []byte{byte(1)}) depositTx, err := l1StandardBridge.DepositETH(l1Opts, 200_000, []byte{byte(1)})
require.NoError(t, err) require.NoError(t, err)
depositReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) depositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
depositInfo, err := e2etest_utils.ParseDepositInfo(depositReceipt) depositInfo, err := e2etest_utils.ParseDepositInfo(depositReceipt)
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -71,9 +70,9 @@ func TestE2EBridgeTransfersStandardBridgeETHDeposit(t *testing.T) { ...@@ -71,9 +70,9 @@ func TestE2EBridgeTransfersStandardBridgeETHDeposit(t *testing.T) {
require.Zero(t, nonce.Uint64()) require.Zero(t, nonce.Uint64())
// (2) Test Deposit Finalization via CrossDomainMessenger relayed message // (2) Test Deposit Finalization via CrossDomainMessenger relayed message
depositReceipt, err = utils.WaitReceiptOK(context.Background(), testSuite.L2Client, types.NewTx(depositInfo.DepositTx).Hash()) depositReceipt, err = wait.ForReceiptOK(context.Background(), testSuite.L2Client, types.NewTx(depositInfo.DepositTx).Hash())
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -99,14 +98,14 @@ func TestE2EBridgeTransfersOptimismPortalETHReceive(t *testing.T) { ...@@ -99,14 +98,14 @@ func TestE2EBridgeTransfersOptimismPortalETHReceive(t *testing.T) {
// (1) Test Deposit Initiation // (1) Test Deposit Initiation
portalDepositTx, err := optimismPortal.Receive(l1Opts) portalDepositTx, err := optimismPortal.Receive(l1Opts)
require.NoError(t, err) require.NoError(t, err)
portalDepositReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L1Client, portalDepositTx.Hash()) portalDepositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L1Client, portalDepositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
depositInfo, err := e2etest_utils.ParseDepositInfo(portalDepositReceipt) depositInfo, err := e2etest_utils.ParseDepositInfo(portalDepositReceipt)
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= portalDepositReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= portalDepositReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -152,17 +151,17 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) { ...@@ -152,17 +151,17 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) {
l1Opts.Value = l2Opts.Value l1Opts.Value = l2Opts.Value
depositTx, err := optimismPortal.Receive(l1Opts) depositTx, err := optimismPortal.Receive(l1Opts)
require.NoError(t, err) require.NoError(t, err)
_, err = utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) _, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// (1) Test Withdrawal Initiation // (1) Test Withdrawal Initiation
withdrawTx, err := l2StandardBridge.Withdraw(l2Opts, predeploys.LegacyERC20ETHAddr, l2Opts.Value, 200_000, []byte{byte(1)}) withdrawTx, err := l2StandardBridge.Withdraw(l2Opts, predeploys.LegacyERC20ETHAddr, l2Opts.Value, 200_000, []byte{byte(1)})
require.NoError(t, err) require.NoError(t, err)
withdrawReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash()) withdrawReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, withdrawTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= withdrawReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= withdrawReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -198,7 +197,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) { ...@@ -198,7 +197,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) {
// wait for processor catchup // wait for processor catchup
proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -229,17 +228,17 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) { ...@@ -229,17 +228,17 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
l1Opts.Value = l2Opts.Value l1Opts.Value = l2Opts.Value
depositTx, err := optimismPortal.Receive(l1Opts) depositTx, err := optimismPortal.Receive(l1Opts)
require.NoError(t, err) require.NoError(t, err)
_, err = utils.WaitReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) _, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// (1) Test Withdrawal Initiation // (1) Test Withdrawal Initiation
l2ToL1MessagePasserWithdrawTx, err := l2ToL1MessagePasser.Receive(l2Opts) l2ToL1MessagePasserWithdrawTx, err := l2ToL1MessagePasser.Receive(l2Opts)
require.NoError(t, err) require.NoError(t, err)
l2ToL1WithdrawReceipt, err := utils.WaitReceiptOK(context.Background(), testSuite.L2Client, l2ToL1MessagePasserWithdrawTx.Hash()) l2ToL1WithdrawReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, l2ToL1MessagePasserWithdrawTx.Hash())
require.NoError(t, err) require.NoError(t, err)
// wait for processor catchup // wait for processor catchup
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader() l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= l2ToL1WithdrawReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= l2ToL1WithdrawReceipt.BlockNumber.Uint64(), nil
})) }))
...@@ -271,7 +270,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) { ...@@ -271,7 +270,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
// wait for processor catchup // wait for processor catchup
proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, l2ToL1WithdrawReceipt) proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, l2ToL1WithdrawReceipt)
require.NoError(t, utils.WaitFor(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader() l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
})) }))
......
...@@ -13,10 +13,10 @@ import ( ...@@ -13,10 +13,10 @@ import (
"github.com/ethereum-optimism/optimism/indexer" "github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processor"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
op_log "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -59,7 +59,10 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -59,7 +59,10 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
// Indexer Configuration and Start // Indexer Configuration and Start
indexerCfg := config.Config{ indexerCfg := config.Config{
Logger: logger,
Logger: op_log.CLIConfig{
Level: "warn",
},
DB: config.DBConfig{ DB: config.DBConfig{
Host: "127.0.0.1", Host: "127.0.0.1",
Port: 5432, Port: 5432,
...@@ -71,7 +74,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -71,7 +74,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(), L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(),
}, },
Chain: config.ChainConfig{ Chain: config.ChainConfig{
L1Contracts: processor.L1Contracts{ L1Contracts: config.L1Contracts{
OptimismPortal: opCfg.L1Deployments.OptimismPortalProxy, OptimismPortal: opCfg.L1Deployments.OptimismPortalProxy,
L2OutputOracle: opCfg.L1Deployments.L2OutputOracleProxy, L2OutputOracle: opCfg.L1Deployments.L2OutputOracleProxy,
L1CrossDomainMessenger: opCfg.L1Deployments.L1CrossDomainMessengerProxy, L1CrossDomainMessenger: opCfg.L1Deployments.L1CrossDomainMessengerProxy,
...@@ -81,9 +84,14 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -81,9 +84,14 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
}, },
} }
db, err := database.NewDB(fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", dbUser, dbName)) db, err := database.NewDB(indexerCfg.DB)
require.NoError(t, err) require.NoError(t, err)
indexer, err := indexer.NewIndexer(indexerCfg) indexer, err := indexer.NewIndexer(
indexerCfg.Chain,
indexerCfg.RPCs,
db,
logger,
)
require.NoError(t, err) require.NoError(t, err)
indexerStoppedCh := make(chan interface{}, 1) indexerStoppedCh := make(chan interface{}, 1)
......
...@@ -2,8 +2,5 @@ ...@@ -2,8 +2,5 @@
INDEXER_L1_ETH_RPC=FILL_ME_IN INDEXER_L1_ETH_RPC=FILL_ME_IN
INDEXER_L2_ETH_RPC=FILL_ME_IN INDEXER_L2_ETH_RPC=FILL_ME_IN
# temporary env variable to enable to new indexer
INDEXER_REFRESH=1
# Fill in to use prisma studio ui with a db other than the default # Fill in to use prisma studio ui with a db other than the default
# DATABASE_URL=FILL_ME_IN # DATABASE_URL=FILL_ME_IN
package flags
import (
"time"
"github.com/urfave/cli"
)
const envVarPrefix = "INDEXER_"
func prefixEnvVar(name string) string {
return envVarPrefix + name
}
var (
/* Required Flags */
BuildEnvFlag = cli.StringFlag{
Name: "build-env",
Usage: "Build environment for which the binary is produced, " +
"e.g. production or development",
Required: true,
EnvVar: prefixEnvVar("BUILD_ENV"),
}
ChainIDFlag = cli.StringFlag{
Name: "chain-id",
Usage: "Ethereum chain ID",
Required: true,
EnvVar: prefixEnvVar("CHAIN_ID"),
}
L1EthRPCFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
Required: true,
EnvVar: prefixEnvVar("L1_ETH_RPC"),
}
L2EthRPCFlag = cli.StringFlag{
Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2",
Required: true,
EnvVar: prefixEnvVar("L2_ETH_RPC"),
}
L1AddressManagerAddressFlag = cli.StringFlag{
Name: "l1-address-manager-address",
Usage: "Address of the L1 address manager",
Required: true,
EnvVar: prefixEnvVar("L1_ADDRESS_MANAGER_ADDRESS"),
}
DBHostFlag = cli.StringFlag{
Name: "db-host",
Usage: "Hostname of the database connection",
Required: true,
EnvVar: prefixEnvVar("DB_HOST"),
}
DBPortFlag = cli.Uint64Flag{
Name: "db-port",
Usage: "Port of the database connection",
Required: true,
EnvVar: prefixEnvVar("DB_PORT"),
}
DBUserFlag = cli.StringFlag{
Name: "db-user",
Usage: "Username of the database connection",
Required: true,
EnvVar: prefixEnvVar("DB_USER"),
}
DBPasswordFlag = cli.StringFlag{
Name: "db-password",
Usage: "Password of the database connection",
Required: true,
EnvVar: prefixEnvVar("DB_PASSWORD"),
}
DBNameFlag = cli.StringFlag{
Name: "db-name",
Usage: "Database name of the database connection",
Required: true,
EnvVar: prefixEnvVar("DB_NAME"),
}
/* Bedrock Flags */
BedrockFlag = cli.BoolFlag{
Name: "bedrock",
Usage: "Whether or not this indexer should operate in Bedrock mode",
EnvVar: prefixEnvVar("BEDROCK"),
}
BedrockL1StandardBridgeAddress = cli.StringFlag{
Name: "bedrock.l1-standard-bridge-address",
Usage: "Address of the L1 standard bridge",
EnvVar: prefixEnvVar("BEDROCK_L1_STANDARD_BRIDGE"),
}
BedrockOptimismPortalAddress = cli.StringFlag{
Name: "bedrock.portal-address",
Usage: "Address of the portal",
EnvVar: prefixEnvVar("BEDROCK_OPTIMISM_PORTAL"),
}
/* Optional Flags */
DisableIndexer = cli.BoolFlag{
Name: "disable-indexer",
Usage: "Whether or not to enable the indexer on this instance",
Required: false,
EnvVar: prefixEnvVar("DISABLE_INDEXER"),
}
LogLevelFlag = cli.StringFlag{
Name: "log-level",
Usage: "The lowest log level that will be output",
Value: "info",
EnvVar: prefixEnvVar("LOG_LEVEL"),
}
LogTerminalFlag = cli.BoolFlag{
Name: "log-terminal",
Usage: "If true, outputs logs in terminal format, otherwise prints " +
"in JSON format. If SENTRY_ENABLE is set to true, this flag is " +
"ignored and logs are printed using JSON",
EnvVar: prefixEnvVar("LOG_TERMINAL"),
}
SentryEnableFlag = cli.BoolFlag{
Name: "sentry-enable",
Usage: "Whether or not to enable Sentry. If true, sentry-dsn must also be set",
EnvVar: prefixEnvVar("SENTRY_ENABLE"),
}
SentryDsnFlag = cli.StringFlag{
Name: "sentry-dsn",
Usage: "Sentry data source name",
EnvVar: prefixEnvVar("SENTRY_DSN"),
}
SentryTraceRateFlag = cli.DurationFlag{
Name: "sentry-trace-rate",
Usage: "Sentry trace rate",
Value: 50 * time.Millisecond,
EnvVar: prefixEnvVar("SENTRY_TRACE_RATE"),
}
L1StartBlockNumberFlag = cli.Uint64Flag{
Name: "start-block-number",
Usage: "The block number to start indexing from. Must be use together with start block hash",
Value: 0,
EnvVar: prefixEnvVar("START_BLOCK_NUMBER"),
}
L1ConfDepthFlag = cli.Uint64Flag{
Name: "l1-conf-depth",
Usage: "The number of confirmations after which headers are considered confirmed on L1",
Value: 20,
EnvVar: prefixEnvVar("L1_CONF_DEPTH"),
}
L2ConfDepthFlag = cli.Uint64Flag{
Name: "l2-conf-depth",
Usage: "The number of confirmations after which headers are considered confirmed on L1",
Value: 24,
EnvVar: prefixEnvVar("L2_CONF_DEPTH"),
}
MaxHeaderBatchSizeFlag = cli.Uint64Flag{
Name: "max-header-batch-size",
Usage: "The maximum number of headers to request as a batch",
Value: 2000,
EnvVar: prefixEnvVar("MAX_HEADER_BATCH_SIZE"),
}
RESTHostnameFlag = cli.StringFlag{
Name: "rest-hostname",
Usage: "The hostname of the REST server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("REST_HOSTNAME"),
}
RESTPortFlag = cli.Uint64Flag{
Name: "rest-port",
Usage: "The port of the REST server",
Value: 8080,
EnvVar: prefixEnvVar("REST_PORT"),
}
MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server",
EnvVar: prefixEnvVar("METRICS_SERVER_ENABLE"),
}
MetricsHostnameFlag = cli.StringFlag{
Name: "metrics-hostname",
Usage: "The hostname of the metrics server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("METRICS_HOSTNAME"),
}
MetricsPortFlag = cli.Uint64Flag{
Name: "metrics-port",
Usage: "The port of the metrics server",
Value: 7300,
EnvVar: prefixEnvVar("METRICS_PORT"),
}
)
var requiredFlags = []cli.Flag{
BuildEnvFlag,
ChainIDFlag,
L1EthRPCFlag,
L2EthRPCFlag,
L1AddressManagerAddressFlag,
DBHostFlag,
DBPortFlag,
DBUserFlag,
DBPasswordFlag,
DBNameFlag,
}
var optionalFlags = []cli.Flag{
BedrockFlag,
BedrockL1StandardBridgeAddress,
BedrockOptimismPortalAddress,
DisableIndexer,
LogLevelFlag,
LogTerminalFlag,
SentryEnableFlag,
SentryDsnFlag,
SentryTraceRateFlag,
L1ConfDepthFlag,
L2ConfDepthFlag,
MaxHeaderBatchSizeFlag,
L1StartBlockNumberFlag,
RESTHostnameFlag,
RESTPortFlag,
MetricsServerEnableFlag,
MetricsHostnameFlag,
MetricsPortFlag,
}
// Flags contains the list of configuration options available to the binary.
var Flags = append(requiredFlags, optionalFlags...)
package flags
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli"
)
// TestRequiredFlagsSetRequired asserts that all flags deemed required properly
// have the Required field set to true.
func TestRequiredFlagsSetRequired(t *testing.T) {
for _, flag := range requiredFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.True(t, reqFlag.IsRequired())
}
}
// TestOptionalFlagsDontSetRequired asserts that all flags deemed optional set
// the Required field to false.
func TestOptionalFlagsDontSetRequired(t *testing.T) {
for _, flag := range optionalFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.False(t, reqFlag.IsRequired())
}
}
...@@ -24,44 +24,31 @@ type Indexer struct { ...@@ -24,44 +24,31 @@ type Indexer struct {
} }
// NewIndexer initializes an instance of the Indexer // NewIndexer initializes an instance of the Indexer
func NewIndexer(cfg config.Config) (*Indexer, error) { func NewIndexer(chainConfig config.ChainConfig, rpcsConfig config.RPCsConfig, db *database.DB, logger log.Logger) (*Indexer, error) {
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", cfg.DB.Host, cfg.DB.Port, cfg.DB.Name) l1Contracts := chainConfig.L1Contracts
if cfg.DB.User != "" { l1EthClient, err := node.DialEthClient(rpcsConfig.L1RPC)
dsn += fmt.Sprintf(" user=%s", cfg.DB.User)
}
if cfg.DB.Password != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DB.Password)
}
db, err := database.NewDB(dsn)
if err != nil {
return nil, err
}
l1Contracts := cfg.Chain.L1Contracts
l1EthClient, err := node.DialEthClient(cfg.RPCs.L1RPC)
if err != nil { if err != nil {
return nil, err return nil, err
} }
l1Processor, err := processor.NewL1Processor(cfg.Logger, l1EthClient, db, l1Contracts) l1Processor, err := processor.NewL1Processor(logger, l1EthClient, db, l1Contracts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// L2Processor (predeploys). Although most likely the right setting, make this configurable? // L2Processor (predeploys). Although most likely the right setting, make this configurable?
l2Contracts := processor.L2ContractPredeploys() l2Contracts := processor.L2ContractPredeploys()
l2EthClient, err := node.DialEthClient(cfg.RPCs.L2RPC) l2EthClient, err := node.DialEthClient(rpcsConfig.L2RPC)
if err != nil { if err != nil {
return nil, err return nil, err
} }
l2Processor, err := processor.NewL2Processor(cfg.Logger, l2EthClient, db, l2Contracts) l2Processor, err := processor.NewL2Processor(logger, l2EthClient, db, l2Contracts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
indexer := &Indexer{ indexer := &Indexer{
db: db, db: db,
log: cfg.Logger, log: logger,
L1Processor: l1Processor, L1Processor: l1Processor,
L2Processor: l2Processor, L2Processor: l2Processor,
} }
......
# Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli
[chain] [chain]
# OP Goerli
preset = 420 preset = 420
[rpcs] [rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}" l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}" l2-rpc = "${INDEXER_RPC_URL_L2}"
...@@ -10,6 +12,7 @@ host = "127.0.0.1" ...@@ -10,6 +12,7 @@ host = "127.0.0.1"
port = 5432 port = 5432
user = "postgres" user = "postgres"
password = "postgres" password = "postgres"
name = "indexer"
[api] [api]
host = "127.0.0.1" host = "127.0.0.1"
...@@ -19,3 +22,11 @@ port = 8080 ...@@ -19,3 +22,11 @@ port = 8080
host = "127.0.0.1" host = "127.0.0.1"
port = 7300 port = 7300
[logger]
# Log level: trace, debug, info, warn, error, crit. Capitals are accepted too.
level = "info"
# Color the log output. Defaults to true if terminal is detected.
color = true
# Format the log output. Supported formats: 'text', 'terminal', 'logfmt', 'json', 'json-pretty'
format = "terminal"
package integration_tests
import (
"database/sql"
"encoding/json"
"fmt"
"math/big"
"net/http"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer/db"
"github.com/ethereum-optimism/optimism/indexer/legacy"
"github.com/ethereum-optimism/optimism/indexer/services/l1"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
_ "github.com/lib/pq"
)
func TestBedrockIndexer(t *testing.T) {
dbParams := createTestDB(t)
cfg := op_e2e.DefaultSystemConfig(t)
cfg.DeployConfig.FinalizationPeriodSeconds = 2
sys, err := cfg.Start()
require.NoError(t, err)
defer sys.Close()
l1Client := sys.Clients["l1"]
l2Client := sys.Clients["sequencer"]
fromAddr := cfg.Secrets.Addresses().Alice
// wait a couple of blocks
require.NoError(t, utils.WaitBlock(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, 10))
l1SB, err := bindings.NewL1StandardBridge(cfg.L1Deployments.L1StandardBridgeProxy, l1Client)
require.NoError(t, err)
l2SB, err := bindings.NewL2StandardBridge(predeploys.L2StandardBridgeAddr, l2Client)
require.NoError(t, err)
l1Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L1ChainIDBig())
require.NoError(t, err)
l2Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L2ChainIDBig())
require.NoError(t, err)
idxrCfg := legacy.Config{
ChainID: cfg.DeployConfig.L1ChainID,
L1EthRpc: sys.Nodes["l1"].HTTPEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].HTTPEndpoint(),
PollInterval: time.Second,
DBHost: dbParams.Host,
DBPort: dbParams.Port,
DBUser: dbParams.User,
DBPassword: dbParams.Password,
DBName: dbParams.Name,
LogLevel: "info",
LogTerminal: true,
L1StartBlockNumber: 0,
L1ConfDepth: 1,
L2ConfDepth: 1,
MaxHeaderBatchSize: 2,
RESTHostname: "127.0.0.1",
RESTPort: 7980,
DisableIndexer: false,
Bedrock: true,
BedrockL1StandardBridgeAddress: cfg.DeployConfig.L1StandardBridgeProxy,
BedrockOptimismPortalAddress: cfg.DeployConfig.OptimismPortalProxy,
}
idxr, err := legacy.NewIndexer(idxrCfg)
require.NoError(t, err)
errCh := make(chan error, 1)
go func() {
errCh <- idxr.Start()
}()
t.Cleanup(func() {
idxr.Stop()
require.NoError(t, <-errCh)
})
makeURL := func(path string) string {
return fmt.Sprintf("http://%s:%d/%s", idxrCfg.RESTHostname, idxrCfg.RESTPort, path)
}
t.Run("deposit ETH", func(t *testing.T) {
l1Opts.Value = big.NewInt(params.Ether)
depTx, err := l1SB.DepositETH(l1Opts, 200_000, nil)
require.NoError(t, err)
depReceipt, err := utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 10*time.Second), l1Client, depTx.Hash())
require.NoError(t, err)
require.Greaterf(t, len(depReceipt.Logs), 0, "must have logs")
var l2Hash common.Hash
for _, eLog := range depReceipt.Logs {
if len(eLog.Topics) == 0 || eLog.Topics[0] != derive.DepositEventABIHash {
continue
}
depLog, err := derive.UnmarshalDepositLogEvent(eLog)
require.NoError(t, err)
tx := types.NewTx(depLog)
l2Hash = tx.Hash()
}
require.NotEqual(t, common.Hash{}, l2Hash)
_, err = utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 15*time.Second), l2Client, l2Hash)
require.NoError(t, err)
// Poll for indexer deposit
var depPage *db.PaginatedDeposits
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedDeposits)
err := getJSON(makeURL(fmt.Sprintf("v1/deposits/%s", fromAddr)), res)
if err != nil {
return false, err
}
if len(res.Deposits) == 0 {
return false, nil
}
depPage = res
return true, nil
}))
// Make sure deposit is what we expect
require.Equal(t, 1, len(depPage.Deposits))
deposit := depPage.Deposits[0]
require.Equal(t, big.NewInt(params.Ether).String(), deposit.Amount)
require.Equal(t, depTx.Hash().String(), deposit.TxHash)
require.Equal(t, depReceipt.BlockNumber.Uint64(), deposit.BlockNumber)
require.Equal(t, fromAddr.String(), deposit.FromAddress)
require.Equal(t, fromAddr.String(), deposit.ToAddress)
require.EqualValues(t, db.ETHL1Token, deposit.L1Token)
require.Equal(t, l1.ZeroAddress.String(), deposit.L2Token)
require.NotEmpty(t, deposit.GUID)
// Perform withdrawal through bridge
l2Opts.Value = big.NewInt(0.5 * params.Ether)
wdTx, err := l2SB.Withdraw(l2Opts, predeploys.LegacyERC20ETHAddr, big.NewInt(0.5*params.Ether), 0, nil)
require.NoError(t, err)
wdReceipt, err := utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, wdTx.Hash())
require.NoError(t, err)
var wdPage *db.PaginatedWithdrawals
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
return false, err
}
if len(res.Withdrawals) == 0 {
return false, nil
}
wdPage = res
return true, nil
}))
require.Equal(t, 1, len(wdPage.Withdrawals))
withdrawal := wdPage.Withdrawals[0]
require.Nil(t, withdrawal.BedrockProvenTxHash)
require.Nil(t, withdrawal.BedrockFinalizedTxHash)
require.Equal(t, big.NewInt(0.5*params.Ether).String(), withdrawal.Amount)
require.Equal(t, wdTx.Hash().String(), withdrawal.TxHash)
require.Equal(t, wdReceipt.BlockNumber.Uint64(), withdrawal.BlockNumber)
// use fromaddr twice here because the user is withdrawing
// to themselves
require.Equal(t, fromAddr.String(), withdrawal.FromAddress)
require.Equal(t, fromAddr.String(), withdrawal.ToAddress)
require.EqualValues(t, l1.ZeroAddress.String(), withdrawal.L1Token)
require.Equal(t, db.ETHL2Token, withdrawal.L2Token)
require.NotEmpty(t, withdrawal.GUID)
// Prove our withdrawal
wdParams, proveReceipt := op_e2e.ProveWithdrawal(t, cfg, l1Client, sys.Nodes["sequencer"], cfg.Secrets.Alice, wdReceipt)
wdPage = nil
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
return false, err
}
if res.Withdrawals[0].BedrockProvenTxHash == nil {
return false, nil
}
wdPage = res
return true, nil
}))
wd := wdPage.Withdrawals[0]
require.Equal(t, proveReceipt.TxHash.String(), *wd.BedrockProvenTxHash)
require.Nil(t, wd.BedrockFinalizedTxHash)
// Finalize withdrawal
err = withdrawals.WaitForFinalizationPeriod(e2eutils.TimeoutCtx(t, 30*time.Second), l1Client, proveReceipt.BlockNumber, config.L1Deployments.L2OutputOracleProxy)
require.Nil(t, err)
finReceipt := op_e2e.FinalizeWithdrawal(t, cfg, l1Client, cfg.Secrets.Alice, wdReceipt, wdParams)
wdPage = nil
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
return false, err
}
if res.Withdrawals[0].BedrockFinalizedTxHash == nil {
return false, nil
}
wdPage = res
return true, nil
}))
wd = wdPage.Withdrawals[0]
require.Equal(t, proveReceipt.TxHash.String(), *wd.BedrockProvenTxHash)
require.Equal(t, finReceipt.TxHash.String(), *wd.BedrockFinalizedTxHash)
require.True(t, *wd.BedrockFinalizedSuccess)
wdPage = new(db.PaginatedWithdrawals)
err = getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s?finalized=false", fromAddr)), wdPage)
require.NoError(t, err)
require.Equal(t, 0, len(wdPage.Withdrawals))
})
}
type testDBParams struct {
Host string
Port uint64
User string
Password string
Name string
}
func createTestDB(t *testing.T) *testDBParams {
user := os.Getenv("DB_USER")
name := fmt.Sprintf("indexer_test_%d", time.Now().Unix())
dsn := "postgres://"
if user != "" {
dsn += user
dsn += "@"
}
dsn += "localhost:5432?sslmode=disable"
pg, err := sql.Open(
"postgres",
dsn,
)
require.NoError(t, err)
_, err = pg.Exec("CREATE DATABASE " + name)
require.NoError(t, err)
t.Cleanup(func() {
_, err = pg.Exec("DROP DATABASE " + name)
require.NoError(t, err)
pg.Close()
})
return &testDBParams{
Host: "localhost",
Port: 5432,
Name: name,
User: user,
}
}
func getJSON(url string, out interface{}) error {
res, err := http.Get(url)
if err != nil {
return err
}
if res.StatusCode != 200 {
return fmt.Errorf("non-200 status code %d", res.StatusCode)
}
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
return dec.Decode(out)
}
package legacy
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/indexer/flags"
)
var (
// ErrSentryDSNNotSet signals that not Data Source Name was provided
// with which to configure Sentry logging.
ErrSentryDSNNotSet = errors.New("sentry-dsn must be set if use-sentry " +
"is true")
)
type Config struct {
/* Required Params */
// ChainID identifies the chain being indexed.
ChainID uint64
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
// L2EthRpc is the HTTP provider URL for L1.
L2EthRpc string
// L1AddressManagerAddress is the address of the address manager for L1.
L1AddressManagerAddress string
// PollInterval is the delay between querying L2 for more transaction
// and creating a new batch.
PollInterval time.Duration
// Hostname of the database connection.
DBHost string
// Port of the database connection.
DBPort uint64
// Username of the database connection.
DBUser string
// Password of the database connection.
DBPassword string
// Database name of the database connection.
DBName string
/* Optional Params */
// LogLevel is the lowest log level that will be output.
LogLevel string
// LogTerminal if true, prints to stdout in terminal format, otherwise
// prints using JSON. If SentryEnable is true this flag is ignored, and logs
// are printed using JSON.
LogTerminal bool
// L1StartBlockNumber is the block number to start indexing L1 from.
L1StartBlockNumber uint64
// L1ConfDepth is the number of confirmations after which headers are
// considered confirmed on L1.
L1ConfDepth uint64
// L2ConfDepth is the number of confirmations after which headers are
// considered confirmed on L2.
L2ConfDepth uint64
// MaxHeaderBatchSize is the maximum number of headers to request as a
// batch.
MaxHeaderBatchSize uint64
// RESTHostname is the hostname at which the REST server is running.
RESTHostname string
// RESTPort is the port at which the REST server is running.
RESTPort uint64
// MetricsServerEnable if true, will create a metrics client and log to
// Prometheus.
MetricsServerEnable bool
// MetricsHostname is the hostname at which the metrics server is running.
MetricsHostname string
// MetricsPort is the port at which the metrics server is running.
MetricsPort uint64
// DisableIndexer enables/disables the indexer.
DisableIndexer bool
// Bedrock enabled Bedrock indexing.
Bedrock bool
BedrockL1StandardBridgeAddress common.Address
BedrockOptimismPortalAddress common.Address
}
// NewConfig parses the Config from the provided flags or environment variables.
// This method fails if ValidateConfig deems the configuration to be malformed.
func NewConfig(ctx *cli.Context) (Config, error) {
cfg := Config{
/* Required Flags */
ChainID: ctx.GlobalUint64(flags.ChainIDFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRPCFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRPCFlag.Name),
L1AddressManagerAddress: ctx.GlobalString(flags.L1AddressManagerAddressFlag.Name),
DBHost: ctx.GlobalString(flags.DBHostFlag.Name),
DBPort: ctx.GlobalUint64(flags.DBPortFlag.Name),
DBUser: ctx.GlobalString(flags.DBUserFlag.Name),
DBPassword: ctx.GlobalString(flags.DBPasswordFlag.Name),
DBName: ctx.GlobalString(flags.DBNameFlag.Name),
/* Optional Flags */
Bedrock: ctx.GlobalBool(flags.BedrockFlag.Name),
BedrockL1StandardBridgeAddress: common.HexToAddress(ctx.GlobalString(flags.BedrockL1StandardBridgeAddress.Name)),
BedrockOptimismPortalAddress: common.HexToAddress(ctx.GlobalString(flags.BedrockOptimismPortalAddress.Name)),
DisableIndexer: ctx.GlobalBool(flags.DisableIndexer.Name),
LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name),
LogTerminal: ctx.GlobalBool(flags.LogTerminalFlag.Name),
L1StartBlockNumber: ctx.GlobalUint64(flags.L1StartBlockNumberFlag.Name),
L1ConfDepth: ctx.GlobalUint64(flags.L1ConfDepthFlag.Name),
L2ConfDepth: ctx.GlobalUint64(flags.L2ConfDepthFlag.Name),
MaxHeaderBatchSize: ctx.GlobalUint64(flags.MaxHeaderBatchSizeFlag.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
RESTHostname: ctx.GlobalString(flags.RESTHostnameFlag.Name),
RESTPort: ctx.GlobalUint64(flags.RESTPortFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
}
err := ValidateConfig(&cfg)
if err != nil {
return Config{}, err
}
return cfg, nil
}
// ValidateConfig ensures additional constraints on the parsed configuration to
// ensure that it is well-formed.
func ValidateConfig(cfg *Config) error {
// Sanity check log level.
if cfg.LogLevel == "" {
cfg.LogLevel = "debug"
}
_, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return err
}
if cfg.Bedrock && (cfg.BedrockL1StandardBridgeAddress == common.Address{} || cfg.BedrockOptimismPortalAddress == common.Address{}) {
return errors.New("must specify l1 standard bridge and optimism portal addresses in bedrock mode")
}
return nil
}
package legacy_test
import (
"fmt"
"testing"
legacy "github.com/ethereum-optimism/optimism/indexer/legacy"
"github.com/stretchr/testify/require"
)
var validateConfigTests = []struct {
name string
cfg legacy.Config
expErr error
}{
{
name: "bad log level",
cfg: legacy.Config{
LogLevel: "unknown",
},
expErr: fmt.Errorf("unknown level: unknown"),
},
}
// TestValidateConfig asserts the behavior of ValidateConfig by testing expected
// error and success configurations.
func TestValidateConfig(t *testing.T) {
for _, test := range validateConfigTests {
t.Run(test.name, func(t *testing.T) {
err := legacy.ValidateConfig(&test.cfg)
require.Equal(t, err, test.expErr)
})
}
}
package legacy
import (
"context"
"fmt"
"math/big"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/ethereum-optimism/optimism/indexer/services"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/indexer/metrics"
"github.com/ethereum-optimism/optimism/indexer/server"
"github.com/rs/cors"
database "github.com/ethereum-optimism/optimism/indexer/db"
"github.com/ethereum-optimism/optimism/indexer/services/l1"
"github.com/ethereum-optimism/optimism/indexer/services/l2"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/gorilla/mux"
"github.com/urfave/cli"
)
const (
// defaultDialTimeout is default duration the service will wait on
// startup to make a connection to either the L1 or L2 backends.
defaultDialTimeout = 5 * time.Second
)
// Main is the entrypoint into the indexer service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
cfg, err := NewConfig(ctx)
if err != nil {
return err
}
log.Info("Initializing indexer")
indexer, err := NewIndexer(cfg)
if err != nil {
log.Error("Unable to create indexer", "error", err)
return err
}
log.Info("Starting indexer")
if err := indexer.Start(); err != nil {
return err
}
defer indexer.Stop()
log.Info("Indexer started")
<-(chan struct{})(nil)
return nil
}
}
// Indexer is a service that configures the necessary resources for
// running the Sync and BlockHandler sub-services.
type Indexer struct {
ctx context.Context
cfg Config
l1Client *ethclient.Client
l2Client *ethclient.Client
l1IndexingService *l1.Service
l2IndexingService *l2.Service
airdropService *services.Airdrop
router *mux.Router
metrics *metrics.Metrics
db *database.Database
server *http.Server
}
// NewIndexer initializes the Indexer, gathering any resources
// that will be needed by the TxIndexer and StateIndexer
// sub-services.
func NewIndexer(cfg Config) (*Indexer, error) {
ctx := context.Background()
var logHandler log.Handler
if cfg.LogTerminal {
logHandler = log.StreamHandler(os.Stdout, log.TerminalFormat(true))
} else {
logHandler = log.StreamHandler(os.Stdout, log.JSONFormat())
}
logLevel, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return nil, err
}
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
// Connect to L1 and L2 providers. Perform these last since they are the
// most expensive.
l1Client, rawl1Client, err := dialEthClientWithTimeout(ctx, cfg.L1EthRpc)
if err != nil {
return nil, err
}
l2Client, l2RPC, err := dialEthClientWithTimeout(ctx, cfg.L2EthRpc)
if err != nil {
return nil, err
}
m := metrics.NewMetrics(nil)
if cfg.MetricsServerEnable {
go func() {
_, err := m.Serve(cfg.MetricsHostname, cfg.MetricsPort)
if err != nil {
log.Error("metrics server failed to start", "err", err)
}
}()
log.Info("metrics server enabled", "host", cfg.MetricsHostname, "port", cfg.MetricsPort)
}
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable",
cfg.DBHost, cfg.DBPort, cfg.DBName)
if cfg.DBUser != "" {
dsn += fmt.Sprintf(" user=%s", cfg.DBUser)
}
if cfg.DBPassword != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DBPassword)
}
db, err := database.NewDatabase(dsn)
if err != nil {
return nil, err
}
var addrManager services.AddressManager
if cfg.Bedrock {
addrManager, err = services.NewBedrockAddresses(
l1Client,
cfg.BedrockL1StandardBridgeAddress,
cfg.BedrockOptimismPortalAddress,
)
} else {
addrManager, err = services.NewLegacyAddresses(l1Client, common.HexToAddress(cfg.L1AddressManagerAddress))
}
if err != nil {
return nil, err
}
l1IndexingService, err := l1.NewService(l1.ServiceConfig{
Context: ctx,
Metrics: m,
L1Client: l1Client,
RawL1Client: rawl1Client,
ChainID: new(big.Int).SetUint64(cfg.ChainID),
AddressManager: addrManager,
DB: db,
ConfDepth: cfg.L1ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: cfg.L1StartBlockNumber,
Bedrock: cfg.Bedrock,
})
if err != nil {
return nil, err
}
l2IndexingService, err := l2.NewService(l2.ServiceConfig{
Context: ctx,
Metrics: m,
L2RPC: l2RPC,
L2Client: l2Client,
DB: db,
ConfDepth: cfg.L2ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: uint64(0),
Bedrock: cfg.Bedrock,
})
if err != nil {
return nil, err
}
return &Indexer{
ctx: ctx,
cfg: cfg,
l1Client: l1Client,
l2Client: l2Client,
l1IndexingService: l1IndexingService,
l2IndexingService: l2IndexingService,
airdropService: services.NewAirdrop(db, m),
router: mux.NewRouter(),
metrics: m,
db: db,
}, nil
}
// Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
})
b.router.HandleFunc("/v1/l1/status", b.l1IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/l2/status", b.l2IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/deposits/0x{address:[a-fA-F0-9]{40}}", b.l1IndexingService.GetDeposits).Methods("GET")
b.router.HandleFunc("/v1/withdrawal/0x{hash:[a-fA-F0-9]{64}}", b.l2IndexingService.GetWithdrawalBatch).Methods("GET")
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/v1/airdrops/0x{address:[a-fA-F0-9]{40}}", b.airdropService.GetAirdrop)
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, err := w.Write([]byte("OK"))
if err != nil {
log.Error("Error handling /healthz", "error", err)
}
})
middleware := server.LoggingMiddleware(b.metrics, log.New("service", "server"))
port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := net.JoinHostPort(b.cfg.RESTHostname, port)
b.server = &http.Server{
Addr: addr,
Handler: middleware(c.Handler(b.router)),
}
errCh := make(chan error, 1)
go func() {
errCh <- b.server.ListenAndServe()
}()
// Capture server startup errors
<-time.After(10 * time.Millisecond)
select {
case err := <-errCh:
return err
default:
log.Info("indexer REST server listening on", "addr", addr)
return nil
}
}
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error {
if b.cfg.DisableIndexer {
log.Info("indexer disabled, only serving data")
} else {
err := b.l1IndexingService.Start()
if err != nil {
return err
}
err = b.l2IndexingService.Start()
if err != nil {
return err
}
}
return b.Serve()
}
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() {
b.db.Close()
if b.server != nil {
// background context here so it waits for
// conns to close
_ = b.server.Shutdown(context.Background())
}
if !b.cfg.DisableIndexer {
b.l1IndexingService.Stop()
b.l2IndexingService.Stop()
}
}
// dialL1EthClientWithTimeout attempts to dial the L1 provider using the
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds,
// this method will return an error.
func dialEthClientWithTimeout(ctx context.Context, url string) (
*ethclient.Client, *rpc.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
c, err := rpc.DialContext(ctxt, url)
if err != nil {
return nil, nil, err
}
return ethclient.NewClient(c), c, nil
}
package metrics
import (
"net"
"net/http"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const metricsNamespace = "indexer"
type Metrics struct {
SyncHeight *prometheus.GaugeVec
DepositsCount *prometheus.CounterVec
WithdrawalsCount *prometheus.CounterVec
StateBatchesCount prometheus.Counter
L1CatchingUp prometheus.Gauge
L2CatchingUp prometheus.Gauge
SyncPercent *prometheus.GaugeVec
UpdateDuration *prometheus.SummaryVec
CachedTokensCount *prometheus.CounterVec
HTTPRequestsCount prometheus.Counter
HTTPResponsesCount *prometheus.CounterVec
HTTPRequestDurationSecs prometheus.Summary
tokenAddrs map[string]string
}
func NewMetrics(monitoredTokens map[string]string) *Metrics {
mts := make(map[string]string)
mts["0x0000000000000000000000000000000000000000"] = "ETH"
for addr, symbol := range monitoredTokens {
mts[addr] = symbol
}
return &Metrics{
SyncHeight: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "sync_height",
Help: "The max height of the indexer's last batch of L1/L1 blocks.",
Namespace: metricsNamespace,
}, []string{
"chain",
}),
DepositsCount: promauto.NewCounterVec(prometheus.CounterOpts{
Name: "deposits_count",
Help: "The number of deposits indexed.",
Namespace: metricsNamespace,
}, []string{
"symbol",
}),
WithdrawalsCount: promauto.NewCounterVec(prometheus.CounterOpts{
Name: "withdrawals_count",
Help: "The number of withdrawals indexed.",
Namespace: metricsNamespace,
}, []string{
"symbol",
}),
StateBatchesCount: promauto.NewCounter(prometheus.CounterOpts{
Name: "state_batches_count",
Help: "The number of state batches indexed.",
Namespace: metricsNamespace,
}),
L1CatchingUp: promauto.NewGauge(prometheus.GaugeOpts{
Name: "l1_catching_up",
Help: "Whether or not L1 is far behind the chain tip.",
Namespace: metricsNamespace,
}),
L2CatchingUp: promauto.NewGauge(prometheus.GaugeOpts{
Name: "l2_catching_up",
Help: "Whether or not L2 is far behind the chain tip.",
Namespace: metricsNamespace,
}),
SyncPercent: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "sync_percent",
Help: "Sync percentage for each chain.",
Namespace: metricsNamespace,
}, []string{
"chain",
}),
UpdateDuration: promauto.NewSummaryVec(prometheus.SummaryOpts{
Name: "update_duration_seconds",
Help: "How long each update took.",
Namespace: metricsNamespace,
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
}, []string{
"chain",
}),
CachedTokensCount: promauto.NewCounterVec(prometheus.CounterOpts{
Name: "cached_tokens_count",
Help: "How many tokens are in the cache",
Namespace: metricsNamespace,
}, []string{
"chain",
}),
HTTPRequestsCount: promauto.NewCounter(prometheus.CounterOpts{
Name: "http_requests_count",
Help: "How many HTTP requests this instance has seen",
Namespace: metricsNamespace,
}),
HTTPResponsesCount: promauto.NewCounterVec(prometheus.CounterOpts{
Name: "http_responses_count",
Help: "How many HTTP responses this instance has served",
Namespace: metricsNamespace,
}, []string{
"status_code",
}),
HTTPRequestDurationSecs: promauto.NewSummary(prometheus.SummaryOpts{
Name: "http_request_duration_secs",
Help: "How long each HTTP request took",
Namespace: metricsNamespace,
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
}),
tokenAddrs: mts,
}
}
func (m *Metrics) SetL1SyncHeight(height uint64) {
m.SyncHeight.WithLabelValues("l1").Set(float64(height))
}
func (m *Metrics) SetL2SyncHeight(height uint64) {
m.SyncHeight.WithLabelValues("l2").Set(float64(height))
}
func (m *Metrics) RecordDeposit(addr common.Address) {
sym := m.tokenAddrs[addr.String()]
if sym == "" {
sym = "UNKNOWN"
}
m.DepositsCount.WithLabelValues(sym).Inc()
}
func (m *Metrics) RecordWithdrawal(addr common.Address) {
sym := m.tokenAddrs[addr.String()]
if sym == "" {
sym = "UNKNOWN"
}
m.WithdrawalsCount.WithLabelValues(sym).Inc()
}
func (m *Metrics) RecordStateBatches(count int) {
m.StateBatchesCount.Add(float64(count))
}
func (m *Metrics) SetL1CatchingUp(state bool) {
var catchingUp float64
if state {
catchingUp = 1
}
m.L1CatchingUp.Set(catchingUp)
}
func (m *Metrics) SetL2CatchingUp(state bool) {
var catchingUp float64
if state {
catchingUp = 1
}
m.L2CatchingUp.Set(catchingUp)
}
func (m *Metrics) SetL1SyncPercent(height uint64, head uint64) {
m.SyncPercent.WithLabelValues("l1").Set(float64(height) / float64(head))
}
func (m *Metrics) SetL2SyncPercent(height uint64, head uint64) {
m.SyncPercent.WithLabelValues("l2").Set(float64(height) / float64(head))
}
func (m *Metrics) IncL1CachedTokensCount() {
m.CachedTokensCount.WithLabelValues("l1").Inc()
}
func (m *Metrics) IncL2CachedTokensCount() {
m.CachedTokensCount.WithLabelValues("l2").Inc()
}
func (m *Metrics) RecordHTTPRequest() {
m.HTTPRequestsCount.Inc()
}
func (m *Metrics) RecordHTTPResponse(code int, dur time.Duration) {
m.HTTPResponsesCount.WithLabelValues(strconv.Itoa(code)).Inc()
m.HTTPRequestDurationSecs.Observe(float64(dur) / float64(time.Second))
}
func (m *Metrics) Serve(hostname string, port uint64) (*http.Server, error) {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
srv := new(http.Server)
srv.Addr = net.JoinHostPort(hostname, strconv.FormatUint(port, 10))
srv.Handler = mux
err := srv.ListenAndServe()
return srv, err
}
...@@ -31,16 +31,16 @@ func NewProcessedContractEvents() *ProcessedContractEvents { ...@@ -31,16 +31,16 @@ func NewProcessedContractEvents() *ProcessedContractEvents {
} }
func (p *ProcessedContractEvents) AddLog(log *types.Log, time uint64) *database.ContractEvent { func (p *ProcessedContractEvents) AddLog(log *types.Log, time uint64) *database.ContractEvent {
contractEvent := database.ContractEventFromGethLog(log, time) event := database.ContractEventFromLog(log, time)
emptyHash := common.Hash{} emptyHash := common.Hash{}
p.events = append(p.events, &contractEvent) p.events = append(p.events, &event)
p.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index}] = &contractEvent p.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index}] = &event
if contractEvent.EventSignature != emptyHash { // ignore anon events if event.EventSignature != emptyHash {
p.eventsBySignature[contractEvent.EventSignature] = append(p.eventsBySignature[contractEvent.EventSignature], &contractEvent) p.eventsBySignature[event.EventSignature] = append(p.eventsBySignature[event.EventSignature], &event)
} }
return &contractEvent return &event
} }
func UnpackLog(out interface{}, log *types.Log, name string, contractAbi *abi.ABI) error { func UnpackLog(out interface{}, log *types.Log, name string, contractAbi *abi.ABI) error {
......
...@@ -40,12 +40,12 @@ type CrossDomainMessengerSentMessageEvent struct { ...@@ -40,12 +40,12 @@ type CrossDomainMessengerSentMessageEvent struct {
Value *big.Int Value *big.Int
MessageHash common.Hash MessageHash common.Hash
RawEvent *database.ContractEvent Event *database.ContractEvent
} }
type CrossDomainMessengerRelayedMessageEvent struct { type CrossDomainMessengerRelayedMessageEvent struct {
*bindings.CrossDomainMessengerRelayedMessage *bindings.CrossDomainMessengerRelayedMessage
RawEvent *database.ContractEvent Event *database.ContractEvent
} }
func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]CrossDomainMessengerSentMessageEvent, error) { func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]CrossDomainMessengerSentMessageEvent, error) {
...@@ -60,7 +60,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C ...@@ -60,7 +60,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C
processedSentMessageEvents := events.eventsBySignature[sentMessageEventAbi.ID] processedSentMessageEvents := events.eventsBySignature[sentMessageEventAbi.ID]
crossDomainMessageEvents := make([]CrossDomainMessengerSentMessageEvent, len(processedSentMessageEvents)) crossDomainMessageEvents := make([]CrossDomainMessengerSentMessageEvent, len(processedSentMessageEvents))
for i, sentMessageEvent := range processedSentMessageEvents { for i, sentMessageEvent := range processedSentMessageEvents {
log := sentMessageEvent.GethLog log := sentMessageEvent.RLPLog
var sentMsgData bindings.CrossDomainMessengerSentMessage var sentMsgData bindings.CrossDomainMessengerSentMessage
sentMsgData.Raw = *log sentMsgData.Raw = *log
...@@ -70,7 +70,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C ...@@ -70,7 +70,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C
} }
var sentMsgExtensionData bindings.CrossDomainMessengerSentMessageExtension1 var sentMsgExtensionData bindings.CrossDomainMessengerSentMessageExtension1
extensionLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog extensionLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].RLPLog
sentMsgExtensionData.Raw = *extensionLog sentMsgExtensionData.Raw = *extensionLog
err = UnpackLog(&sentMsgExtensionData, extensionLog, sentMessageEventExtensionAbi.Name, crossDomainMessengerABI) err = UnpackLog(&sentMsgExtensionData, extensionLog, sentMessageEventExtensionAbi.Name, crossDomainMessengerABI)
if err != nil { if err != nil {
...@@ -86,7 +86,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C ...@@ -86,7 +86,7 @@ func CrossDomainMessengerSentMessageEvents(events *ProcessedContractEvents) ([]C
CrossDomainMessengerSentMessage: &sentMsgData, CrossDomainMessengerSentMessage: &sentMsgData,
Value: sentMsgExtensionData.Value, Value: sentMsgExtensionData.Value,
MessageHash: msgHash, MessageHash: msgHash,
RawEvent: sentMessageEvent, Event: sentMessageEvent,
} }
} }
...@@ -103,7 +103,7 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) ( ...@@ -103,7 +103,7 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) (
processedRelayedMessageEvents := events.eventsBySignature[relayedMessageEventAbi.ID] processedRelayedMessageEvents := events.eventsBySignature[relayedMessageEventAbi.ID]
crossDomainMessageEvents := make([]CrossDomainMessengerRelayedMessageEvent, len(processedRelayedMessageEvents)) crossDomainMessageEvents := make([]CrossDomainMessengerRelayedMessageEvent, len(processedRelayedMessageEvents))
for i, relayedMessageEvent := range processedRelayedMessageEvents { for i, relayedMessageEvent := range processedRelayedMessageEvents {
log := relayedMessageEvent.GethLog log := relayedMessageEvent.RLPLog
var relayedMsgData bindings.CrossDomainMessengerRelayedMessage var relayedMsgData bindings.CrossDomainMessengerRelayedMessage
relayedMsgData.Raw = *log relayedMsgData.Raw = *log
...@@ -114,7 +114,7 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) ( ...@@ -114,7 +114,7 @@ func CrossDomainMessengerRelayedMessageEvents(events *ProcessedContractEvents) (
crossDomainMessageEvents[i] = CrossDomainMessengerRelayedMessageEvent{ crossDomainMessageEvents[i] = CrossDomainMessengerRelayedMessageEvent{
CrossDomainMessengerRelayedMessage: &relayedMsgData, CrossDomainMessengerRelayedMessage: &relayedMsgData,
RawEvent: relayedMessageEvent, Event: relayedMessageEvent,
} }
} }
......
...@@ -6,8 +6,8 @@ import ( ...@@ -6,8 +6,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"reflect"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
...@@ -23,32 +23,6 @@ import ( ...@@ -23,32 +23,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
type L1Contracts struct {
OptimismPortal common.Address
L2OutputOracle common.Address
L1CrossDomainMessenger common.Address
L1StandardBridge common.Address
L1ERC721Bridge common.Address
// Some more contracts -- ProxyAdmin, SystemConfig, etcc
// Ignore the auxiliary contracts?
// Legacy contracts? We'll add this in to index the legacy chain.
// Remove afterwards?
}
func (c L1Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
contracts := make([]common.Address, len(fields))
for i, field := range fields {
contracts[i] = (v.FieldByName(field.Name).Interface()).(common.Address)
}
return contracts
}
type checkpointAbi struct { type checkpointAbi struct {
l2OutputOracle *abi.ABI l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI legacyStateCommitmentChain *abi.ABI
...@@ -58,7 +32,7 @@ type L1Processor struct { ...@@ -58,7 +32,7 @@ type L1Processor struct {
processor processor
} }
func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l1Contracts L1Contracts) (*L1Processor, error) { func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l1Contracts config.L1Contracts) (*L1Processor, error) {
l1ProcessLog := logger.New("processor", "l1") l1ProcessLog := logger.New("processor", "l1")
l1ProcessLog.Info("initializing processor") l1ProcessLog.Info("initializing processor")
...@@ -107,7 +81,7 @@ func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB ...@@ -107,7 +81,7 @@ func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB
return l1Processor, nil return l1Processor, nil
} }
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts, checkpointAbi checkpointAbi) ProcessFn { func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts config.L1Contracts, checkpointAbi checkpointAbi) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient()) rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l1Contracts.ToSlice() contractAddrs := l1Contracts.ToSlice()
...@@ -197,7 +171,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1 ...@@ -197,7 +171,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
continue continue
} }
indexedL1Headers = append(indexedL1Headers, &database.L1BlockHeader{BlockHeader: database.BlockHeaderFromGethHeader(header)}) indexedL1Headers = append(indexedL1Headers, &database.L1BlockHeader{BlockHeader: database.BlockHeaderFromHeader(header)})
} }
/** Update Database **/ /** Update Database **/
...@@ -261,7 +235,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1 ...@@ -261,7 +235,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
} }
} }
func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *database.DB, l1Contracts L1Contracts, events *ProcessedContractEvents) error { func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *database.DB, l1Contracts config.L1Contracts, events *ProcessedContractEvents) error {
// (1) Process New Deposits // (1) Process New Deposits
portalDeposits, err := OptimismPortalTransactionDepositEvents(events) portalDeposits, err := OptimismPortalTransactionDepositEvents(events)
if err != nil { if err != nil {
...@@ -275,7 +249,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -275,7 +249,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
transactionDeposits[i] = &database.L1TransactionDeposit{ transactionDeposits[i] = &database.L1TransactionDeposit{
SourceHash: depositTx.SourceHash, SourceHash: depositTx.SourceHash,
L2TransactionHash: types.NewTx(depositTx).Hash(), L2TransactionHash: types.NewTx(depositTx).Hash(),
InitiatedL1EventGUID: depositEvent.RawEvent.GUID, InitiatedL1EventGUID: depositEvent.Event.GUID,
Version: database.U256{Int: depositEvent.Version}, Version: database.U256{Int: depositEvent.Version},
OpaqueData: depositEvent.OpaqueData, OpaqueData: depositEvent.OpaqueData,
GasLimit: database.U256{Int: new(big.Int).SetUint64(depositTx.Gas)}, GasLimit: database.U256{Int: new(big.Int).SetUint64(depositTx.Gas)},
...@@ -284,7 +258,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -284,7 +258,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
ToAddress: depositTx.From, ToAddress: depositTx.From,
Amount: database.U256{Int: depositTx.Value}, Amount: database.U256{Int: depositTx.Value},
Data: depositTx.Data, Data: depositTx.Data,
Timestamp: depositEvent.RawEvent.Timestamp, Timestamp: depositEvent.Event.Timestamp,
}, },
} }
...@@ -294,6 +268,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -294,6 +268,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
TransactionSourceHash: depositTx.SourceHash, TransactionSourceHash: depositTx.SourceHash,
Tx: transactionDeposits[i].Tx, Tx: transactionDeposits[i].Tx,
TokenPair: database.TokenPair{ TokenPair: database.TokenPair{
// TODO index eth token if it doesn't exist
L1TokenAddress: predeploys.LegacyERC20ETHAddr, L1TokenAddress: predeploys.LegacyERC20ETHAddr,
L2TokenAddress: predeploys.LegacyERC20ETHAddr, L2TokenAddress: predeploys.LegacyERC20ETHAddr,
}, },
...@@ -340,7 +315,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -340,7 +315,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
if withdrawal == nil { if withdrawal == nil {
// We need to ensure we are in a caught up state before claiming a missing event. Since L2 timestamps // We need to ensure we are in a caught up state before claiming a missing event. Since L2 timestamps
// are derived from L1, we can simply compare the timestamp of this event with the latest L2 header. // are derived from L1, we can simply compare the timestamp of this event with the latest L2 header.
if provenWithdrawal.RawEvent.Timestamp > latestL2Header.Timestamp { if provenWithdrawal.Event.Timestamp > latestL2Header.Timestamp {
processLog.Warn("behind on indexed L2 withdrawals") processLog.Warn("behind on indexed L2 withdrawals")
return errors.New("waiting for L2Processor to catch up") return errors.New("waiting for L2Processor to catch up")
} else { } else {
...@@ -349,7 +324,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -349,7 +324,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
} }
} }
err = db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(withdrawalHash, provenWithdrawal.RawEvent.GUID) err = db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(withdrawalHash, provenWithdrawal.Event.GUID)
if err != nil { if err != nil {
return err return err
} }
...@@ -376,7 +351,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -376,7 +351,7 @@ func l1ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
return errors.New("withdrawal missing!") return errors.New("withdrawal missing!")
} }
err = db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(withdrawalHash, finalizedWithdrawal.RawEvent.GUID, finalizedWithdrawal.Success) err = db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(withdrawalHash, finalizedWithdrawal.Event.GUID, finalizedWithdrawal.Success)
if err != nil { if err != nil {
return err return err
} }
...@@ -399,10 +374,10 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -399,10 +374,10 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
sentMessages := make([]*database.L1BridgeMessage, len(sentMessageEvents)) sentMessages := make([]*database.L1BridgeMessage, len(sentMessageEvents))
for i, sentMessageEvent := range sentMessageEvents { for i, sentMessageEvent := range sentMessageEvents {
log := sentMessageEvent.RawEvent.GethLog log := sentMessageEvent.Event.RLPLog
// extract the deposit hash from the previous TransactionDepositedEvent // extract the deposit hash from the previous TransactionDepositedEvent
transactionDepositedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GethLog transactionDepositedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].RLPLog
depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog) depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog)
if err != nil { if err != nil {
return err return err
...@@ -413,14 +388,14 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -413,14 +388,14 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
BridgeMessage: database.BridgeMessage{ BridgeMessage: database.BridgeMessage{
Nonce: database.U256{Int: sentMessageEvent.MessageNonce}, Nonce: database.U256{Int: sentMessageEvent.MessageNonce},
MessageHash: sentMessageEvent.MessageHash, MessageHash: sentMessageEvent.MessageHash,
SentMessageEventGUID: sentMessageEvent.RawEvent.GUID, SentMessageEventGUID: sentMessageEvent.Event.GUID,
GasLimit: database.U256{Int: sentMessageEvent.GasLimit}, GasLimit: database.U256{Int: sentMessageEvent.GasLimit},
Tx: database.Transaction{ Tx: database.Transaction{
FromAddress: sentMessageEvent.Sender, FromAddress: sentMessageEvent.Sender,
ToAddress: sentMessageEvent.Target, ToAddress: sentMessageEvent.Target,
Amount: database.U256{Int: sentMessageEvent.Value}, Amount: database.U256{Int: sentMessageEvent.Value},
Data: sentMessageEvent.Message, Data: sentMessageEvent.Message,
Timestamp: sentMessageEvent.RawEvent.Timestamp, Timestamp: sentMessageEvent.Event.Timestamp,
}, },
}, },
} }
...@@ -454,7 +429,7 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -454,7 +429,7 @@ func l1ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
return fmt.Errorf("missing indexed L2CrossDomainMessager mesesage: 0x%x", relayedMessage.MsgHash) return fmt.Errorf("missing indexed L2CrossDomainMessager mesesage: 0x%x", relayedMessage.MsgHash)
} }
err = db.BridgeMessages.MarkRelayedL2BridgeMessage(relayedMessage.MsgHash, relayedMessage.RawEvent.GUID) err = db.BridgeMessages.MarkRelayedL2BridgeMessage(relayedMessage.MsgHash, relayedMessage.Event.GUID)
if err != nil { if err != nil {
return err return err
} }
...@@ -479,11 +454,12 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -479,11 +454,12 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
deposits := make([]*database.L1BridgeDeposit, len(initiatedDepositEvents)) deposits := make([]*database.L1BridgeDeposit, len(initiatedDepositEvents))
for i, initiatedBridgeEvent := range initiatedDepositEvents { for i, initiatedBridgeEvent := range initiatedDepositEvents {
log := initiatedBridgeEvent.RawEvent.GethLog log := initiatedBridgeEvent.Event.RLPLog
// extract the deposit hash from the following TransactionDeposited event // extract the deposit hash from the following TransactionDeposited event. The `BlockHash` and `LogIndex`
transactionDepositedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog // fields are filled in for `RLPLog` which is required for `DepositTx#SourceHash` to be computed correctly
depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedLog) transactionDepositedRLPLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].RLPLog
depositTx, err := derive.UnmarshalDepositLogEvent(transactionDepositedRLPLog)
if err != nil { if err != nil {
return err return err
} }
...@@ -491,13 +467,14 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -491,13 +467,14 @@ func l1ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
deposits[i] = &database.L1BridgeDeposit{ deposits[i] = &database.L1BridgeDeposit{
TransactionSourceHash: depositTx.SourceHash, TransactionSourceHash: depositTx.SourceHash,
CrossDomainMessengerNonce: &database.U256{Int: initiatedBridgeEvent.CrossDomainMessengerNonce}, CrossDomainMessengerNonce: &database.U256{Int: initiatedBridgeEvent.CrossDomainMessengerNonce},
TokenPair: database.TokenPair{L1TokenAddress: initiatedBridgeEvent.LocalToken, L2TokenAddress: initiatedBridgeEvent.RemoteToken}, // TODO index the tokens pairs if they don't exist
TokenPair: database.TokenPair{L1TokenAddress: initiatedBridgeEvent.LocalToken, L2TokenAddress: initiatedBridgeEvent.RemoteToken},
Tx: database.Transaction{ Tx: database.Transaction{
FromAddress: initiatedBridgeEvent.From, FromAddress: initiatedBridgeEvent.From,
ToAddress: initiatedBridgeEvent.To, ToAddress: initiatedBridgeEvent.To,
Amount: database.U256{Int: initiatedBridgeEvent.Amount}, Amount: database.U256{Int: initiatedBridgeEvent.Amount},
Data: initiatedBridgeEvent.ExtraData, Data: initiatedBridgeEvent.ExtraData,
Timestamp: initiatedBridgeEvent.RawEvent.Timestamp, Timestamp: initiatedBridgeEvent.Event.Timestamp,
}, },
} }
} }
......
...@@ -106,7 +106,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -106,7 +106,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
l2Headers := make([]*database.L2BlockHeader, len(headers)) l2Headers := make([]*database.L2BlockHeader, len(headers))
l2HeaderMap := make(map[common.Hash]*types.Header) l2HeaderMap := make(map[common.Hash]*types.Header)
for i, header := range headers { for i, header := range headers {
l2Headers[i] = &database.L2BlockHeader{BlockHeader: database.BlockHeaderFromGethHeader(header)} l2Headers[i] = &database.L2BlockHeader{BlockHeader: database.BlockHeaderFromHeader(header)}
l2HeaderMap[l2Headers[i].Hash] = header l2HeaderMap[l2Headers[i].Hash] = header
} }
...@@ -183,7 +183,7 @@ func l2ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -183,7 +183,7 @@ func l2ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
for i, withdrawalEvent := range messagesPassed { for i, withdrawalEvent := range messagesPassed {
transactionWithdrawals[i] = &database.L2TransactionWithdrawal{ transactionWithdrawals[i] = &database.L2TransactionWithdrawal{
WithdrawalHash: withdrawalEvent.WithdrawalHash, WithdrawalHash: withdrawalEvent.WithdrawalHash,
InitiatedL2EventGUID: withdrawalEvent.RawEvent.GUID, InitiatedL2EventGUID: withdrawalEvent.Event.GUID,
Nonce: database.U256{Int: withdrawalEvent.Nonce}, Nonce: database.U256{Int: withdrawalEvent.Nonce},
GasLimit: database.U256{Int: withdrawalEvent.GasLimit}, GasLimit: database.U256{Int: withdrawalEvent.GasLimit},
Tx: database.Transaction{ Tx: database.Transaction{
...@@ -191,7 +191,7 @@ func l2ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa ...@@ -191,7 +191,7 @@ func l2ProcessContractEventsBridgeTransactions(processLog log.Logger, db *databa
ToAddress: withdrawalEvent.Target, ToAddress: withdrawalEvent.Target,
Amount: database.U256{Int: withdrawalEvent.Value}, Amount: database.U256{Int: withdrawalEvent.Value},
Data: withdrawalEvent.Data, Data: withdrawalEvent.Data,
Timestamp: withdrawalEvent.RawEvent.Timestamp, Timestamp: withdrawalEvent.Event.Timestamp,
}, },
} }
...@@ -246,10 +246,10 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -246,10 +246,10 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
sentMessages := make([]*database.L2BridgeMessage, len(sentMessageEvents)) sentMessages := make([]*database.L2BridgeMessage, len(sentMessageEvents))
for i, sentMessageEvent := range sentMessageEvents { for i, sentMessageEvent := range sentMessageEvents {
log := sentMessageEvent.RawEvent.GethLog log := sentMessageEvent.Event.RLPLog
// extract the withdrawal hash from the previous MessagePassed event // extract the withdrawal hash from the previous MessagePassed event
msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].GethLog msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index - 1}].RLPLog
msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog) msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog)
if err != nil { if err != nil {
return err return err
...@@ -260,14 +260,14 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -260,14 +260,14 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
BridgeMessage: database.BridgeMessage{ BridgeMessage: database.BridgeMessage{
Nonce: database.U256{Int: sentMessageEvent.MessageNonce}, Nonce: database.U256{Int: sentMessageEvent.MessageNonce},
MessageHash: sentMessageEvent.MessageHash, MessageHash: sentMessageEvent.MessageHash,
SentMessageEventGUID: sentMessageEvent.RawEvent.GUID, SentMessageEventGUID: sentMessageEvent.Event.GUID,
GasLimit: database.U256{Int: sentMessageEvent.GasLimit}, GasLimit: database.U256{Int: sentMessageEvent.GasLimit},
Tx: database.Transaction{ Tx: database.Transaction{
FromAddress: sentMessageEvent.Sender, FromAddress: sentMessageEvent.Sender,
ToAddress: sentMessageEvent.Target, ToAddress: sentMessageEvent.Target,
Amount: database.U256{Int: sentMessageEvent.Value}, Amount: database.U256{Int: sentMessageEvent.Value},
Data: sentMessageEvent.Message, Data: sentMessageEvent.Message,
Timestamp: sentMessageEvent.RawEvent.Timestamp, Timestamp: sentMessageEvent.Event.Timestamp,
}, },
}, },
} }
...@@ -307,7 +307,7 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -307,7 +307,7 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
// Since the transaction processor running prior does not ensure the deposit inclusion, we need to // Since the transaction processor running prior does not ensure the deposit inclusion, we need to
// ensure we are in a caught up state before claiming a missing event. Since L2 timestamps are derived // ensure we are in a caught up state before claiming a missing event. Since L2 timestamps are derived
// from L1, we can simply compare the timestamp of this event with the latest L1 header. // from L1, we can simply compare the timestamp of this event with the latest L1 header.
if latestL1Header == nil || relayedMessage.RawEvent.Timestamp > latestL1Header.Timestamp { if latestL1Header == nil || relayedMessage.Event.Timestamp > latestL1Header.Timestamp {
processLog.Warn("waiting for L1Processor to catch up on L1CrossDomainMessages") processLog.Warn("waiting for L1Processor to catch up on L1CrossDomainMessages")
return errors.New("waiting for L1Processor to catch up") return errors.New("waiting for L1Processor to catch up")
} else { } else {
...@@ -316,7 +316,7 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db ...@@ -316,7 +316,7 @@ func l2ProcessContractEventsBridgeCrossDomainMessages(processLog log.Logger, db
} }
} }
err = db.BridgeMessages.MarkRelayedL1BridgeMessage(relayedMessage.MsgHash, relayedMessage.RawEvent.GUID) err = db.BridgeMessages.MarkRelayedL1BridgeMessage(relayedMessage.MsgHash, relayedMessage.Event.GUID)
if err != nil { if err != nil {
return err return err
} }
...@@ -346,10 +346,10 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -346,10 +346,10 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
withdrawals := make([]*database.L2BridgeWithdrawal, len(initiatedWithdrawalEvents)) withdrawals := make([]*database.L2BridgeWithdrawal, len(initiatedWithdrawalEvents))
for i, initiatedBridgeEvent := range initiatedWithdrawalEvents { for i, initiatedBridgeEvent := range initiatedWithdrawalEvents {
log := initiatedBridgeEvent.RawEvent.GethLog log := initiatedBridgeEvent.Event.RLPLog
// extract the withdrawal hash from the following MessagePassed event // extract the withdrawal hash from the following MessagePassed event
msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].GethLog msgPassedLog := events.eventByLogIndex[ProcessedContractEventLogIndexKey{log.BlockHash, log.Index + 1}].RLPLog
msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog) msgPassedEvent, err := l2ToL1MessagePasserABI.ParseMessagePassed(*msgPassedLog)
if err != nil { if err != nil {
return err return err
...@@ -364,7 +364,7 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D ...@@ -364,7 +364,7 @@ func l2ProcessContractEventsStandardBridge(processLog log.Logger, db *database.D
ToAddress: initiatedBridgeEvent.To, ToAddress: initiatedBridgeEvent.To,
Amount: database.U256{Int: initiatedBridgeEvent.Amount}, Amount: database.U256{Int: initiatedBridgeEvent.Amount},
Data: initiatedBridgeEvent.ExtraData, Data: initiatedBridgeEvent.ExtraData,
Timestamp: initiatedBridgeEvent.RawEvent.Timestamp, Timestamp: initiatedBridgeEvent.Event.Timestamp,
}, },
} }
} }
......
...@@ -7,7 +7,7 @@ import ( ...@@ -7,7 +7,7 @@ import (
type L2ToL1MessagePasserMessagePassed struct { type L2ToL1MessagePasserMessagePassed struct {
*bindings.L2ToL1MessagePasserMessagePassed *bindings.L2ToL1MessagePasserMessagePassed
RawEvent *database.ContractEvent Event *database.ContractEvent
} }
func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL1MessagePasserMessagePassed, error) { func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL1MessagePasserMessagePassed, error) {
...@@ -20,7 +20,7 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL ...@@ -20,7 +20,7 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL
processedMessagePassedEvents := events.eventsBySignature[l2ToL1MessagePasserAbi.Events[eventName].ID] processedMessagePassedEvents := events.eventsBySignature[l2ToL1MessagePasserAbi.Events[eventName].ID]
messagesPassed := make([]L2ToL1MessagePasserMessagePassed, len(processedMessagePassedEvents)) messagesPassed := make([]L2ToL1MessagePasserMessagePassed, len(processedMessagePassedEvents))
for i, messagePassedEvent := range processedMessagePassedEvents { for i, messagePassedEvent := range processedMessagePassedEvents {
log := messagePassedEvent.GethLog log := messagePassedEvent.RLPLog
var messagePassed bindings.L2ToL1MessagePasserMessagePassed var messagePassed bindings.L2ToL1MessagePasserMessagePassed
messagePassed.Raw = *log messagePassed.Raw = *log
...@@ -31,7 +31,7 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL ...@@ -31,7 +31,7 @@ func L2ToL1MessagePasserMessagesPassed(events *ProcessedContractEvents) ([]L2ToL
messagesPassed[i] = L2ToL1MessagePasserMessagePassed{ messagesPassed[i] = L2ToL1MessagePasserMessagePassed{
L2ToL1MessagePasserMessagePassed: &messagePassed, L2ToL1MessagePasserMessagePassed: &messagePassed,
RawEvent: messagePassedEvent, Event: messagePassedEvent,
} }
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package bridge
import "time"
const (
DefaultConnectionTimeout = 60 * time.Second
)
This diff is collapsed.
This diff is collapsed.
package bridge
import "github.com/ethereum/go-ethereum/log"
var logger = log.New("service", "l1-bridge")
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package bridge
import "github.com/ethereum/go-ethereum/log"
var logger = log.New("service", "l2-bridge")
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment