Commit f5fd582f authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/contract-caller

parents 6786de09 836ec280
......@@ -3,7 +3,7 @@
"changelog": ["@changesets/changelog-github", { "repo": "ethereum-optimism/optimism" }],
"commit": false,
"fixed": [],
"linked": [],
"linked": [["contracts-bedrock", "contracts-ts"]],
"access": "public",
"baseBranch": "develop",
"updateInternalDependencies": "patch",
......
......@@ -143,6 +143,7 @@ jobs:
- "packages/fault-detector/node_modules"
- "packages/replica-healthcheck/node_modules"
- "packages/sdk/node_modules"
- "packages/contracts-ts/node_modules"
- run:
name: print forge version
command: forge --version
......@@ -709,6 +710,24 @@ jobs:
name: Upload coverage
command: codecov --verbose --clean --flags <<parameters.coverage_flag>>
contracts-ts-tests:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- restore_cache:
name: Restore pnpm Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm.lock.yaml" }}
- check-changed:
patterns: sdk,contracts-bedrock,contracts
- run:
name: Check generated and build
command: pnpm generate:check
working_directory: packages/contracts-ts
sdk-next-tests:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
......@@ -1008,7 +1027,7 @@ jobs:
steps:
- checkout
- check-changed:
patterns: op-(.+),packages
patterns: op-(.+),packages,ops-bedrock
- run:
name: Install latest golang
command: |
......@@ -1336,6 +1355,13 @@ workflows:
dependencies: "(common-ts|core-utils|sdk)"
requires:
- pnpm-monorepo
- js-lint-test:
name: contracts-ts-tests
coverage_flag: contracts-ts-tests
package_name: contracts-ts
dependencies: '(contracts-bedrock|contracts-ts)'
requires:
- pnpm-monorepo
- js-lint-test:
name: sdk-next-tests
coverage_flag: sdk-next-tests
......
......@@ -49,7 +49,8 @@ jobs:
id: changesets
with:
createGithubReleases: false
publish: pnpm release
publish: pnpm release:publish
version: pnpm release:version
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
......
......@@ -56,6 +56,9 @@
"inputs": ["default", "testing", "^production"],
"dependsOn": ["^build"]
},
"generate": {
"dependsOn": ["^build"]
},
"build:contracts": {
"inputs": [
"configsProject",
......
......@@ -28,3 +28,7 @@ $ make devtools
```
The geth docs for `abigen` can be found [here](https://geth.ethereum.org/docs/dapp/native-bindings).
## See also
TypeScript bindings are also generated in [@eth-optimism/contracts-ts](../packages/contracts-ts/)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,6 +2,9 @@ package predeploys
import "github.com/ethereum/go-ethereum/common"
// TODO - we should get a single toml yaml or json file source of truth in @eth-optimism/bedrock package
// This needs to be kept in sync with @eth-optimism/contracts-ts/wagmi.config.ts which also specifies this
// To improve robustness and maintainability contracts-bedrock should export all addresses
const (
L2ToL1MessagePasser = "0x4200000000000000000000000000000000000016"
DeployerWhitelist = "0x4200000000000000000000000000000000000002"
......
......@@ -31,6 +31,7 @@ func setupFaultDisputeGame() (common.Address, *bind.TransactOpts, *backends.Simu
backend,
[32]byte{0x01},
big.NewInt(15),
uint64(604800), // 7 days
common.Address{0xdd},
)
if err != nil {
......
package op_e2e
import (
"time"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
// fakePoS is a testing-only utility to attach to Geth,
// to build a fake proof-of-stake L1 chain with fixed block time and basic lagging safe/finalized blocks.
type fakePoS struct {
clock clock.Clock
eth *eth.Ethereum
log log.Logger
blockTime uint64
finalizedDistance uint64
safeDistance uint64
engineAPI *catalyst.ConsensusAPI
sub ethereum.Subscription
}
func (f *fakePoS) Start() error {
if advancing, ok := f.clock.(*clock.AdvancingClock); ok {
advancing.Start()
}
f.sub = event.NewSubscription(func(quit <-chan struct{}) error {
// poll every half a second: enough to catch up with any block time when ticks are missed
t := f.clock.NewTicker(time.Second / 2)
for {
select {
case now := <-t.Ch():
chain := f.eth.BlockChain()
head := chain.CurrentBlock()
finalized := chain.CurrentFinalBlock()
if finalized == nil { // fallback to genesis if nothing is finalized
finalized = chain.Genesis().Header()
}
safe := chain.CurrentSafeBlock()
if safe == nil { // fallback to finalized if nothing is safe
safe = finalized
}
if head.Number.Uint64() > f.finalizedDistance { // progress finalized block, if we can
finalized = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.finalizedDistance)
}
if head.Number.Uint64() > f.safeDistance { // progress safe block, if we can
safe = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.safeDistance)
}
// start building the block as soon as we are past the current head time
if head.Time >= uint64(now.Unix()) {
continue
}
newBlockTime := head.Time + f.blockTime
if time.Unix(int64(newBlockTime), 0).Add(5 * time.Minute).Before(f.clock.Now()) {
// We're a long way behind, let's skip some blocks...
newBlockTime = uint64(f.clock.Now().Unix())
}
res, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, &engine.PayloadAttributes{
Timestamp: newBlockTime,
Random: common.Hash{},
SuggestedFeeRecipient: head.Coinbase,
})
if err != nil {
f.log.Error("failed to start building L1 block", "err", err)
continue
}
if res.PayloadID == nil {
f.log.Error("failed to start block building", "res", res)
continue
}
// wait with sealing, if we are not behind already
delay := time.Unix(int64(newBlockTime), 0).Sub(f.clock.Now())
tim := f.clock.NewTimer(delay)
select {
case <-tim.Ch():
// no-op
case <-quit:
tim.Stop()
return nil
}
payload, err := f.engineAPI.GetPayloadV1(*res.PayloadID)
if err != nil {
f.log.Error("failed to finish building L1 block", "err", err)
continue
}
if _, err := f.engineAPI.NewPayloadV1(*payload); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
continue
}
if _, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, nil); err != nil {
f.log.Error("failed to make built L1 block canonical", "err", err)
continue
}
case <-quit:
return nil
}
}
})
return nil
}
func (f *fakePoS) Stop() error {
f.sub.Unsubscribe()
if advancing, ok := f.clock.(*clock.AdvancingClock); ok {
advancing.Stop()
}
return nil
}
package op_e2e
import (
"context"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/stretchr/testify/require"
)
func TestTimeTravel(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
delete(cfg.Nodes, "verifier")
cfg.SupportL1TimeTravel = true
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
preTravel, err := l1Client.BlockByNumber(context.Background(), nil)
require.NoError(t, err)
sys.TimeTravelClock.AdvanceTime(24 * time.Hour)
// Check that the L1 chain reaches the new time reasonably quickly (ie without taking a week)
// It should be able to jump straight to the new time with just a single block
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
err = e2eutils.WaitFor(ctx, time.Second, func() (bool, error) {
postTravel, err := l1Client.BlockByNumber(context.Background(), nil)
if err != nil {
return false, err
}
diff := time.Duration(postTravel.Time()-preTravel.Time()) * time.Second
return diff.Hours() > 23, nil
})
require.NoError(t, err)
}
......@@ -9,9 +9,9 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -21,7 +21,6 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
......@@ -110,7 +109,7 @@ func waitForBlock(number *big.Int, client *ethclient.Client, timeout time.Durati
}
}
func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, c clock.Clock, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
ethConfig := &ethconfig.Config{
NetworkId: cfg.DeployConfig.L1ChainID,
Genesis: genesis,
......@@ -134,6 +133,7 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, opts ...GethOption) (*
// Instead of running a whole beacon node, we run this fake-proof-of-stake sidecar that sequences L1 blocks using the Engine API.
l1Node.RegisterLifecycle(&fakePoS{
clock: c,
eth: l1Eth,
log: log.Root(), // geth logger is global anyway. Would be nice to replace with a local logger though.
blockTime: cfg.DeployConfig.L1BlockTime,
......@@ -146,104 +146,6 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, opts ...GethOption) (*
return l1Node, l1Eth, nil
}
// fakePoS is a testing-only utility to attach to Geth,
// to build a fake proof-of-stake L1 chain with fixed block time and basic lagging safe/finalized blocks.
type fakePoS struct {
eth *eth.Ethereum
log log.Logger
blockTime uint64
finalizedDistance uint64
safeDistance uint64
engineAPI *catalyst.ConsensusAPI
sub ethereum.Subscription
}
func (f *fakePoS) Start() error {
f.sub = event.NewSubscription(func(quit <-chan struct{}) error {
// poll every half a second: enough to catch up with any block time when ticks are missed
t := time.NewTicker(time.Second / 2)
for {
select {
case now := <-t.C:
chain := f.eth.BlockChain()
head := chain.CurrentBlock()
finalized := chain.CurrentFinalBlock()
if finalized == nil { // fallback to genesis if nothing is finalized
finalized = chain.Genesis().Header()
}
safe := chain.CurrentSafeBlock()
if safe == nil { // fallback to finalized if nothing is safe
safe = finalized
}
if head.Number.Uint64() > f.finalizedDistance { // progress finalized block, if we can
finalized = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.finalizedDistance)
}
if head.Number.Uint64() > f.safeDistance { // progress safe block, if we can
safe = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.safeDistance)
}
// start building the block as soon as we are past the current head time
if head.Time >= uint64(now.Unix()) {
continue
}
res, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, &engine.PayloadAttributes{
Timestamp: head.Time + f.blockTime,
Random: common.Hash{},
SuggestedFeeRecipient: head.Coinbase,
})
if err != nil {
f.log.Error("failed to start building L1 block", "err", err)
continue
}
if res.PayloadID == nil {
f.log.Error("failed to start block building", "res", res)
continue
}
// wait with sealing, if we are not behind already
delay := time.Until(time.Unix(int64(head.Time+f.blockTime), 0))
tim := time.NewTimer(delay)
select {
case <-tim.C:
// no-op
case <-quit:
tim.Stop()
return nil
}
payload, err := f.engineAPI.GetPayloadV1(*res.PayloadID)
if err != nil {
f.log.Error("failed to finish building L1 block", "err", err)
continue
}
if _, err := f.engineAPI.NewPayloadV1(*payload); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
continue
}
if _, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, nil); err != nil {
f.log.Error("failed to make built L1 block canonical", "err", err)
continue
}
case <-quit:
return nil
}
}
})
return nil
}
func (f *fakePoS) Stop() error {
f.sub.Unsubscribe()
return nil
}
func defaultNodeConfig(name string, jwtPath string) *node.Config {
return &node.Config{
Name: name,
......
......@@ -241,6 +241,9 @@ type SystemConfig struct {
// Target L1 tx size for the batcher transactions
BatcherTargetL1TxSizeBytes uint64
// SupportL1TimeTravel determines if the L1 node supports quickly skipping forward in time
SupportL1TimeTravel bool
}
type System struct {
......@@ -258,6 +261,13 @@ type System struct {
L2OutputSubmitter *l2os.L2OutputSubmitter
BatchSubmitter *bss.BatchSubmitter
Mocknet mocknet.Mocknet
// TimeTravelClock is nil unless SystemConfig.SupportL1TimeTravel was set to true
// It provides access to the clock instance used by the L1 node. Calling TimeTravelClock.AdvanceBy
// allows tests to quickly time travel L1 into the future.
// Note that this time travel may occur in a single block, creating a very large difference in the Time
// on sequential blocks.
TimeTravelClock *clock.AdvancingClock
}
func (sys *System) NodeEndpoint(name string) string {
......@@ -339,6 +349,12 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
}
}()
c := clock.SystemClock
if cfg.SupportL1TimeTravel {
sys.TimeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond)
c = sys.TimeTravelClock
}
l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig)
if err != nil {
return nil, err
......@@ -412,7 +428,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
sys.RollupConfig = &defaultConfig
// Initialize nodes
l1Node, l1Backend, err := initL1Geth(&cfg, l1Genesis, cfg.GethOptions["l1"]...)
l1Node, l1Backend, err := initL1Geth(&cfg, l1Genesis, c, cfg.GethOptions["l1"]...)
if err != nil {
return nil, err
}
......
package clock
import (
"sync/atomic"
"time"
)
type AdvancingClock struct {
*DeterministicClock
systemTime Clock
ticker Ticker
advanceEvery time.Duration
quit chan interface{}
running atomic.Bool
lastTick time.Time
}
// NewAdvancingClock creates a clock that, when started, advances at the same rate as the system clock but
// can also be advanced arbitrary amounts using the AdvanceTime method.
// Unlike the system clock, time does not progress smoothly but only increments when AdvancedTime is called or
// approximately after advanceEvery duration has elapsed. When advancing based on the system clock, the total time
// the system clock has advanced is added to the current time, preventing time differences from building up over time.
func NewAdvancingClock(advanceEvery time.Duration) *AdvancingClock {
now := SystemClock.Now()
return &AdvancingClock{
DeterministicClock: NewDeterministicClock(now),
systemTime: SystemClock,
advanceEvery: advanceEvery,
quit: make(chan interface{}),
lastTick: now,
}
}
func (c *AdvancingClock) Start() {
if !c.running.CompareAndSwap(false, true) {
// Already running
return
}
c.ticker = c.systemTime.NewTicker(c.advanceEvery)
go func() {
for {
select {
case now := <-c.ticker.Ch():
c.onTick(now)
case <-c.quit:
return
}
}
}()
}
func (c *AdvancingClock) Stop() {
if !c.running.CompareAndSwap(true, false) {
// Already stopped
return
}
c.quit <- nil
}
func (c *AdvancingClock) onTick(now time.Time) {
if !now.After(c.lastTick) {
// Time hasn't progressed for some reason, so do nothing
return
}
// Advance time by however long it has been since the last update.
// Ensures we don't drift from system time by more and more over time
advanceBy := now.Sub(c.lastTick)
c.AdvanceTime(advanceBy)
c.lastTick = now
}
package clock
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestAdvancingClock_AdvancesByTimeBetweenTicks(t *testing.T) {
clock, realTime := newTestAdvancingClock(1 * time.Second)
clock.Start()
defer clock.Stop()
eventTicker := clock.NewTicker(1 * time.Second)
start := clock.Now()
realTime.AdvanceTime(1 * time.Second)
require.Equal(t, start.Add(1*time.Second), <-eventTicker.Ch(), "should trigger events when advancing")
require.Equal(t, start.Add(1*time.Second), clock.Now(), "Should advance on single tick")
start = clock.Now()
realTime.AdvanceTime(15 * time.Second)
require.Equal(t, start.Add(15*time.Second), <-eventTicker.Ch(), "should trigger events when advancing")
require.Equal(t, start.Add(15*time.Second), clock.Now(), "Should advance by time between ticks")
}
func TestAdvancingClock_Stop(t *testing.T) {
clock, realTime := newTestAdvancingClock(1 * time.Second)
clock.Start()
defer clock.Stop()
eventTicker := clock.NewTicker(1 * time.Second)
// Stop the clock again
clock.Stop()
start := clock.Now()
realTime.AdvanceTime(15 * time.Second)
clock.Start()
// Trigger the next tick
realTime.AdvanceTime(1 * time.Second)
// Time advances by the whole time the clock was stopped
// Note: if events were triggered while the clock was stopped, this event would be for the wrong time
require.Equal(t, start.Add(16*time.Second), <-eventTicker.Ch(), "should trigger events again after restarting")
require.Equal(t, start.Add(16*time.Second), clock.Now(), "Should advance by time between ticks after restarting")
}
func newTestAdvancingClock(advanceEvery time.Duration) (*AdvancingClock, *DeterministicClock) {
systemTime := NewDeterministicClock(time.UnixMilli(1000))
clock := &AdvancingClock{
DeterministicClock: NewDeterministicClock(time.UnixMilli(5000)),
systemTime: systemTime,
advanceEvery: advanceEvery,
quit: make(chan interface{}),
lastTick: systemTime.Now(),
}
return clock, systemTime
}
......@@ -13,6 +13,8 @@ type Clock interface {
// It is equivalent to time.After
After(d time.Duration) <-chan time.Time
AfterFunc(d time.Duration, f func()) Timer
// NewTicker returns a new Ticker containing a channel that will send
// the current time on the channel after each tick. The period of the
// ticks is specified by the duration argument. The ticker will adjust
......@@ -20,6 +22,10 @@ type Clock interface {
// The duration d must be greater than zero; if not, NewTicker will
// panic. Stop the ticker to release associated resources.
NewTicker(d time.Duration) Ticker
// NewTimer creates a new Timer that will send
// the current time on its channel after at least duration d.
NewTimer(d time.Duration) Timer
}
// A Ticker holds a channel that delivers "ticks" of a clock at intervals
......@@ -38,6 +44,25 @@ type Ticker interface {
Reset(d time.Duration)
}
// Timer represents a single event.
type Timer interface {
// Ch returns the channel for the ticker. Equivalent to time.Timer.C
Ch() <-chan time.Time
// Stop prevents the Timer from firing.
// It returns true if the call stops the timer, false if the timer has already
// expired or been stopped.
// Stop does not close the channel, to prevent a read from the channel succeeding
// incorrectly.
//
// For a timer created with AfterFunc(d, f), if t.Stop returns false, then the timer
// has already expired and the function f has been started in its own goroutine;
// Stop does not wait for f to complete before returning.
// If the caller needs to know whether f is completed, it must coordinate
// with f explicitly.
Stop() bool
}
// SystemClock provides an instance of Clock that uses the system clock via methods in the time package.
var SystemClock Clock = systemClock{}
......@@ -63,3 +88,19 @@ func (t *SystemTicker) Ch() <-chan time.Time {
func (s systemClock) NewTicker(d time.Duration) Ticker {
return &SystemTicker{time.NewTicker(d)}
}
func (s systemClock) NewTimer(d time.Duration) Timer {
return &SystemTimer{time.NewTimer(d)}
}
type SystemTimer struct {
*time.Timer
}
func (t *SystemTimer) Ch() <-chan time.Time {
return t.C
}
func (s systemClock) AfterFunc(d time.Duration, f func()) Timer {
return &SystemTimer{time.AfterFunc(d, f)}
}
......@@ -29,6 +29,43 @@ func (t task) fire(now time.Time) bool {
return false
}
type timer struct {
f func()
ch chan time.Time
due time.Time
stopped bool
run bool
sync.Mutex
}
func (t *timer) isDue(now time.Time) bool {
t.Lock()
defer t.Unlock()
return !t.due.After(now)
}
func (t *timer) fire(now time.Time) bool {
t.Lock()
defer t.Unlock()
if !t.stopped {
t.f()
t.run = true
}
return false
}
func (t *timer) Ch() <-chan time.Time {
return t.ch
}
func (t *timer) Stop() bool {
t.Lock()
defer t.Unlock()
r := !t.stopped && !t.run
t.stopped = true
return r
}
type ticker struct {
c Clock
ch chan time.Time
......@@ -70,8 +107,12 @@ func (t *ticker) fire(now time.Time) bool {
if t.stopped {
return false
}
t.ch <- now
// Publish without blocking and only update due time if we publish successfully
select {
case t.ch <- now:
t.nextDue = now.Add(t.period)
default:
}
return true
}
......@@ -110,6 +151,18 @@ func (s *DeterministicClock) After(d time.Duration) <-chan time.Time {
return ch
}
func (s *DeterministicClock) AfterFunc(d time.Duration, f func()) Timer {
s.lock.Lock()
defer s.lock.Unlock()
timer := &timer{f: f, due: s.now.Add(d)}
if d.Nanoseconds() == 0 {
timer.fire(s.now)
} else {
s.addPending(timer)
}
return timer
}
func (s *DeterministicClock) NewTicker(d time.Duration) Ticker {
if d <= 0 {
panic("Continuously firing tickers are a really bad idea")
......@@ -127,6 +180,21 @@ func (s *DeterministicClock) NewTicker(d time.Duration) Ticker {
return t
}
func (s *DeterministicClock) NewTimer(d time.Duration) Timer {
s.lock.Lock()
defer s.lock.Unlock()
ch := make(chan time.Time, 1)
t := &timer{
f: func() {
ch <- s.now
},
ch: ch,
due: s.now.Add(d),
}
s.addPending(t)
return t
}
func (s *DeterministicClock) addPending(t action) {
s.pending = append(s.pending, t)
select {
......
......@@ -2,6 +2,8 @@ package clock
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
......@@ -62,6 +64,64 @@ func TestAfter(t *testing.T) {
})
}
func TestAfterFunc(t *testing.T) {
t.Run("ZeroExecutesImmediately", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ran := new(atomic.Bool)
timer := clock.AfterFunc(0, func() { ran.Store(true) })
require.True(t, ran.Load(), "duration should already have been reached")
require.False(t, timer.Stop(), "Stop should return false after executing")
})
t.Run("CompletesWhenTimeAdvances", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ran := new(atomic.Bool)
timer := clock.AfterFunc(500*time.Millisecond, func() { ran.Store(true) })
require.False(t, ran.Load(), "should not complete immediately")
clock.AdvanceTime(499 * time.Millisecond)
require.False(t, ran.Load(), "should not complete before time is due")
clock.AdvanceTime(1 * time.Millisecond)
require.True(t, ran.Load(), "should complete when time is reached")
require.False(t, timer.Stop(), "Stop should return false after executing")
})
t.Run("CompletesWhenTimeAdvancesPastDue", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ran := new(atomic.Bool)
timer := clock.AfterFunc(500*time.Millisecond, func() { ran.Store(true) })
require.False(t, ran.Load(), "should not complete immediately")
clock.AdvanceTime(9000 * time.Millisecond)
require.True(t, ran.Load(), "should complete when time is reached")
require.False(t, timer.Stop(), "Stop should return false after executing")
})
t.Run("RegisterAsPending", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ran := new(atomic.Bool)
clock.AfterFunc(500*time.Millisecond, func() { ran.Store(true) })
ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFunc()
require.True(t, clock.WaitForNewPendingTask(ctx), "should have added a new pending task")
})
t.Run("DoNotRunIfStopped", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ran := new(atomic.Bool)
timer := clock.AfterFunc(500*time.Millisecond, func() { ran.Store(true) })
require.False(t, ran.Load(), "should not complete immediately")
require.True(t, timer.Stop(), "Stop should return true on first call")
require.False(t, timer.Stop(), "Stop should return false on subsequent calls")
clock.AdvanceTime(9000 * time.Millisecond)
require.False(t, ran.Load(), "should not run when time is reached")
})
}
func TestNewTicker(t *testing.T) {
t.Run("FiresAfterEachDuration", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
......@@ -97,6 +157,38 @@ func TestNewTicker(t *testing.T) {
require.Len(t, ticker.Ch(), 0, "should not fire until due again")
})
t.Run("SkipsFiringWhenProcessingIsSlow", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
// Fire once to fill the channel queue
clock.AdvanceTime(5 * time.Second)
firstEventTime := clock.Now()
var startProcessing sync.WaitGroup
startProcessing.Add(1)
processedTicks := make(chan time.Time)
go func() {
startProcessing.Wait()
// Read two events then exit
for i := 0; i < 2; i++ {
event := <-ticker.Ch()
processedTicks <- event
}
}()
// Advance time further before processing of events has started
// Can't publish any further events to the channel but shouldn't block
clock.AdvanceTime(30 * time.Second)
// Allow processing to start
startProcessing.Done()
require.Equal(t, firstEventTime, <-processedTicks, "Should process first event")
clock.AdvanceTime(5 * time.Second)
require.Equal(t, clock.Now(), <-processedTicks, "Should skip to latest time")
})
t.Run("StopFiring", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
......@@ -158,6 +250,46 @@ func TestNewTicker(t *testing.T) {
})
}
func TestNewTimer(t *testing.T) {
t.Run("FireOnceAfterDuration", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
timer := clock.NewTimer(5 * time.Second)
require.Len(t, timer.Ch(), 0, "should not fire immediately")
clock.AdvanceTime(4 * time.Second)
require.Len(t, timer.Ch(), 0, "should not fire before due")
clock.AdvanceTime(1 * time.Second)
require.Len(t, timer.Ch(), 1, "should fire when due")
require.Equal(t, clock.Now(), <-timer.Ch(), "should post current time")
clock.AdvanceTime(6 * time.Second)
require.Len(t, timer.Ch(), 0, "should not fire when due again")
})
t.Run("StopBeforeExecuted", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
timer := clock.NewTimer(5 * time.Second)
require.True(t, timer.Stop())
clock.AdvanceTime(10 * time.Second)
require.Len(t, timer.Ch(), 0, "should not fire after stop")
})
t.Run("StopAfterExecuted", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
timer := clock.NewTimer(5 * time.Second)
clock.AdvanceTime(10 * time.Second)
require.Len(t, timer.Ch(), 1, "should fire when due")
require.Equal(t, clock.Now(), <-timer.Ch(), "should post current time")
require.False(t, timer.Stop())
})
}
func TestWaitForPending(t *testing.T) {
t.Run("DoNotBlockWhenAlreadyPending", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
......
......@@ -19,3 +19,9 @@ test:
lint:
go vet ./...
.PHONY: test
tls:
kubectl get secrets op-ufm-client-tls -o yaml | yq '.data."tls.key"' | base64 --decode > tls/tls.key
kubectl get secrets op-ufm-client-tls -o yaml | yq '.data."tls.crt"' | base64 --decode > tls/tls.crt
kubectl get secrets op-ufm-client-tls -o yaml | yq '.data."ca.crt"' | base64 --decode > tls/ca.crt
.PHONY: tls
......@@ -3,6 +3,7 @@ module github.com/ethereum-optimism/optimism/op-ufm
go 1.20
require (
cloud.google.com/go/kms v1.12.1
github.com/BurntSushi/toml v1.3.2
github.com/ethereum-optimism/optimism/op-service v0.10.14
github.com/ethereum-optimism/optimism/op-signer v0.1.1
......@@ -11,9 +12,13 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.16.0
github.com/rs/cors v1.9.0
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130
)
require (
cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.0 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
......@@ -32,8 +37,13 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
......@@ -55,10 +65,18 @@ require (
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/urfave/cli v1.22.9 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.1.0 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.8.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
google.golang.org/api v0.132.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/grpc v1.56.2 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
)
This diff is collapsed.
......@@ -5,8 +5,6 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -33,9 +31,7 @@ func Dial(providerName string, url string) (*InstrumentedEthClient, error) {
func (i *InstrumentedEthClient) TransactionByHash(ctx context.Context, hash common.Hash) (*types.Transaction, bool, error) {
start := time.Now()
log.Debug(">> TransactionByHash", "hash", hash, "provider", i.providerName)
tx, isPending, err := i.c.TransactionByHash(ctx, hash)
log.Debug("<< TransactionByHash", "tx", tx, "isPending", isPending, "err", err, "hash", hash, "provider", i.providerName)
if err != nil {
if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.TransactionByHash")
......
package main
import (
"context"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"fmt"
"os"
kms "cloud.google.com/go/kms/apiv1"
"cloud.google.com/go/kms/apiv1/kmspb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
func main() {
println("kmstool - usage: kmstool <key>")
if len(os.Args) < 2 {
panic("missing <key>")
}
keyName := os.Args[1]
ctx := context.Background()
client, err := kms.NewKeyManagementClient(ctx)
if err != nil {
panic(fmt.Sprintf("failed to create kms client: %w", err))
}
defer client.Close()
addr, err := resolveAddr(ctx, client, keyName)
if err != nil {
panic(fmt.Sprintf("failed to retrieve the key: %w", err))
}
fmt.Printf("ethereum addr: %s", addr)
println()
println()
}
func resolveAddr(ctx context.Context, client *kms.KeyManagementClient, keyName string) (common.Address, error) {
resp, err := client.GetPublicKey(ctx, &kmspb.GetPublicKeyRequest{Name: keyName})
if err != nil {
return common.Address{}, fmt.Errorf("google kms public key %q lookup: %w", keyName, err)
}
block, _ := pem.Decode([]byte(resp.Pem))
if block == nil {
return common.Address{}, fmt.Errorf("google kms public key %q pem empty: %.130q", keyName, resp.Pem)
}
var info struct {
AlgID pkix.AlgorithmIdentifier
Key asn1.BitString
}
_, err = asn1.Unmarshal(block.Bytes, &info)
if err != nil {
return common.Address{}, fmt.Errorf("google kms public key %q pem block %q: %v", keyName, block.Type, err)
}
wantAlg := asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
if gotAlg := info.AlgID.Algorithm; !gotAlg.Equal(wantAlg) {
return common.Address{}, fmt.Errorf("google kms public key %q asn.1 algorithm %s intead of %s", keyName, gotAlg, wantAlg)
}
return pubKeyAddr(info.Key.Bytes), nil
}
// PubKeyAddr returns the Ethereum address for the (uncompressed) key bytes.
func pubKeyAddr(bytes []byte) common.Address {
digest := crypto.Keccak256(bytes[1:])
var addr common.Address
copy(addr[:], digest[12:])
return addr
}
FROM ethereum/client-go:v1.11.2
FROM ethereum/client-go:v1.12.0
RUN apk add --no-cache jq
......
......@@ -30,7 +30,7 @@ RUN mkdir manifests && \
# .nvmrc
cp .nvmrc ../manifests/
FROM ethereumoptimism/foundry:latest as foundry
FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest as foundry
# bullseye-slim is debian based
# we use it rather than alpien because it's not much
# bigger and alpine is often missing packages for node applications
......
......@@ -10,6 +10,7 @@
},
"scripts": {
"clean": "npx nx run-many --target=clean",
"bindings": "nx bindings @eth-optimism/contracts-bedrock",
"build": "npx nx run-many --target=build",
"test": "npx nx run-many --target=test",
"lint": "npx nx run-many --target=lint",
......@@ -26,7 +27,7 @@
"prepare": "husky install",
"release": "npx nx run-many --target=build --skip-nx-cache && pnpm changeset publish",
"release:check": "changeset status --verbose --since=origin/main",
"release:publish": "npx nx run-many --target=build && pnpm install && pnpm build && changeset publish",
"release:publish": "pnpm install --frozen-lockfile && npx nx run-many --target=build && pnpm build && changeset publish",
"release:version": "changeset version && pnpm install --lockfile-only",
"install:foundry": "curl -L https://foundry.paradigm.xyz | bash && pnpm update:foundry",
"update:foundry": "foundryup -C $(cat .foundryrc)"
......
......@@ -83,37 +83,36 @@ FaucetTest:test_nonAdmin_drip_fails() (gas: 262520)
FaucetTest:test_receive_succeeds() (gas: 17401)
FaucetTest:test_withdraw_nonAdmin_reverts() (gas: 13145)
FaucetTest:test_withdraw_succeeds() (gas: 78359)
FaultDisputeGame_ResolvesCorrectly_Chaos:test_resolvesCorrectly_succeeds() (gas: 111450391)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 498867)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 505669)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 502410)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 505545)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 504906)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 497632)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 504434)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 501175)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 502310)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 501671)
FaultDisputeGame_Test:test_extraData_succeeds() (gas: 17426)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 498839)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 505685)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 502382)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 505561)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 504878)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 497604)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 504450)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 501147)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 502326)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 501643)
FaultDisputeGame_Test:test_extraData_succeeds() (gas: 17404)
FaultDisputeGame_Test:test_gameData_succeeds() (gas: 17917)
FaultDisputeGame_Test:test_gameStart_succeeds() (gas: 10315)
FaultDisputeGame_Test:test_gameType_succeeds() (gas: 8282)
FaultDisputeGame_Test:test_gameType_succeeds() (gas: 8260)
FaultDisputeGame_Test:test_initialRootClaimData_succeeds() (gas: 17669)
FaultDisputeGame_Test:test_move_clockCorrectness_succeeds() (gas: 415981)
FaultDisputeGame_Test:test_move_clockTimeExceeded_reverts() (gas: 26387)
FaultDisputeGame_Test:test_move_clockCorrectness_succeeds() (gas: 416029)
FaultDisputeGame_Test:test_move_clockTimeExceeded_reverts() (gas: 26399)
FaultDisputeGame_Test:test_move_defendRoot_reverts() (gas: 13360)
FaultDisputeGame_Test:test_move_duplicateClaim_reverts() (gas: 103230)
FaultDisputeGame_Test:test_move_gameDepthExceeded_reverts() (gas: 408100)
FaultDisputeGame_Test:test_move_duplicateClaim_reverts() (gas: 103254)
FaultDisputeGame_Test:test_move_gameDepthExceeded_reverts() (gas: 408148)
FaultDisputeGame_Test:test_move_gameNotInProgress_reverts() (gas: 10968)
FaultDisputeGame_Test:test_move_nonExistentParent_reverts() (gas: 24655)
FaultDisputeGame_Test:test_move_simpleAttack_succeeds() (gas: 107344)
FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 224784)
FaultDisputeGame_Test:test_move_simpleAttack_succeeds() (gas: 107356)
FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 224820)
FaultDisputeGame_Test:test_resolve_notInProgress_reverts() (gas: 9657)
FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 109749)
FaultDisputeGame_Test:test_resolve_rootUncontestedClockNotExpired_succeeds() (gas: 21422)
FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27251)
FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 395442)
FaultDisputeGame_Test:test_rootClaim_succeeds() (gas: 8181)
FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 109773)
FaultDisputeGame_Test:test_resolve_rootUncontestedClockNotExpired_succeeds() (gas: 21434)
FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27263)
FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 395502)
FaultDisputeGame_Test:test_rootClaim_succeeds() (gas: 8225)
FeeVault_Test:test_constructor_succeeds() (gas: 18185)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352113)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2950320)
......
......@@ -56,11 +56,11 @@ contract MIPS {
IPreimageOracle public oracle;
/// @notice Extends the value leftwards with its most significant bit (sign extension).
function SE(uint32 dat, uint32 idx) internal pure returns (uint32) {
bool isSigned = (dat >> (idx - 1)) != 0;
uint256 signed = ((1 << (32 - idx)) - 1) << idx;
uint256 mask = (1 << idx) - 1;
return uint32(dat & mask | (isSigned ? signed : 0));
function SE(uint32 _dat, uint32 _idx) internal pure returns (uint32) {
bool isSigned = (_dat >> (_idx - 1)) != 0;
uint256 signed = ((1 << (32 - _idx)) - 1) << _idx;
uint256 mask = (1 << _idx) - 1;
return uint32(_dat & mask | (isSigned ? signed : 0));
}
/// @notice Computes the hash of the MIPS state.
......@@ -113,23 +113,25 @@ contract MIPS {
}
/// @notice Handles a syscall.
function handleSyscall() internal returns (bytes32) {
function handleSyscall() internal returns (bytes32 out_) {
// Load state from memory
State memory state;
assembly {
state := 0x80
}
// Load the syscall number from the registers
uint32 syscall_no = state.registers[2];
uint32 v0 = 0;
uint32 v1 = 0;
// Load the syscall arguments from the registers
uint32 a0 = state.registers[4];
uint32 a1 = state.registers[5];
uint32 a2 = state.registers[6];
// mmap: Allocates a page from the heap.
if (syscall_no == 4090) {
// mmap
uint32 sz = a1;
if (sz&4095 != 0) { // adjust size to align with page size
sz += 4096 - (sz&4095);
......@@ -140,27 +142,37 @@ contract MIPS {
} else {
v0 = a0;
}
} else if (syscall_no == 4045) {
// brk
}
// brk: Returns a fixed address for the program break at 0x40000000
else if (syscall_no == 4045) {
v0 = BRK_START;
} else if (syscall_no == 4120) {
// clone (not supported)
}
// clone (not supported) returns 1
else if (syscall_no == 4120) {
v0 = 1;
} else if (syscall_no == 4246) {
// exit group
}
// exit group: Sets the Exited and ExitCode states to true and argument 0.
else if (syscall_no == 4246) {
state.exited = true;
state.exitCode = uint8(a0);
return outputState();
} else if (syscall_no == 4003) { // read
}
// read: Like Linux read syscall. Splits unaligned reads into aligned reads.
else if (syscall_no == 4003) {
// args: a0 = fd, a1 = addr, a2 = count
// returns: v0 = read, v1 = err code
if (a0 == FD_STDIN) {
// leave v0 and v1 zero: read nothing, no error
} else if (a0 == FD_PREIMAGE_READ) { // pre-image oracle
// Leave v0 and v1 zero: read nothing, no error
}
// pre-image oracle read
else if (a0 == FD_PREIMAGE_READ) {
// verify proof 1 is correct, and get the existing memory.
uint32 mem = readMem(a1 & 0xFFffFFfc, 1); // mask the addr to align it to 4 bytes
(bytes32 dat, uint256 datLen) = oracle.readPreimage(state.preimageKey, state.preimageOffset);
assembly { // assembly for more precise ops, and no var count limit
// Transform data for writing to memory
// We use assembly for more precise ops, and no var count limit
assembly {
let alignment := and(a1, 3) // the read might not start at an aligned address
let space := sub(4, alignment) // remaining space in memory word
if lt(space, datLen) { datLen := space } // if less space than data, shorten data
......@@ -172,25 +184,38 @@ contract MIPS {
mask := and(mask, not(suffixMask)) // reduce mask to just cover the data we insert
mem := or(and(mem, not(mask)), dat) // clear masked part of original memory, and insert data
}
// Write memory back
writeMem(a1 & 0xFFffFFfc, 1, mem);
state.preimageOffset += uint32(datLen);
v0 = uint32(datLen);
} else if (a0 == FD_HINT_READ) { // hint response
// don't actually read into memory, just say we read it all, we ignore the result anyway
}
// hint response
else if (a0 == FD_HINT_READ) {
// Don't read into memory, just say we read it all
// The result is ignored anyway
v0 = a2;
} else {
}
else {
v0 = 0xFFffFFff;
v1 = EBADF;
}
} else if (syscall_no == 4004) { // write
}
// write: like Linux write syscall. Splits unaligned writes into aligned writes.
else if (syscall_no == 4004) {
// args: a0 = fd, a1 = addr, a2 = count
// returns: v0 = written, v1 = err code
if (a0 == FD_STDOUT || a0 == FD_STDERR || a0 == FD_HINT_WRITE) {
v0 = a2; // tell program we have written everything
} else if (a0 == FD_PREIMAGE_WRITE) { // pre-image oracle
}
// pre-image oracle
else if (a0 == FD_PREIMAGE_WRITE) {
uint32 mem = readMem(a1 & 0xFFffFFfc, 1); // mask the addr to align it to 4 bytes
bytes32 key = state.preimageKey;
assembly { // assembly for more precise ops, and no var count limit
// Construct pre-image key from memory
// We use assembly for more precise ops, and no var count limit
assembly {
let alignment := and(a1, 3) // the read might not start at an aligned address
let space := sub(4, alignment) // remaining space in memory word
if lt(space, a2) { a2 := space } // if less space than data, shorten data
......@@ -199,14 +224,20 @@ contract MIPS {
mem := and(shr(mul(sub(space, a2), 8), mem), mask) // align value to right, mask it
key := or(key, mem) // insert into key
}
// Write pre-image key to oracle
state.preimageKey = key;
state.preimageOffset = 0; // reset offset, to read new pre-image data from the start
v0 = a2;
} else {
}
else {
v0 = 0xFFffFFff;
v1 = EBADF;
}
} else if (syscall_no == 4055) { // fcntl
}
// fcntl: Like linux fcntl syscall, but only supports minimal file-descriptor control commands,
// to retrieve the file-descriptor R/W flags.
else if (syscall_no == 4055) { // fcntl
// args: a0 = fd, a1 = cmd
if (a1 == 3) { // F_GETFL: get file descriptor flags
if (a0 == FD_STDIN || a0 == FD_PREIMAGE_READ || a0 == FD_HINT_READ) {
......@@ -223,13 +254,15 @@ contract MIPS {
}
}
// Write the results back to the state registers
state.registers[2] = v0;
state.registers[7] = v1;
// Update the PC and nextPC
state.pc = state.nextPC;
state.nextPC = state.nextPC + 4;
return outputState();
out_ = outputState();
}
/// @notice Handles a branch instruction, updating the MIPS state PC where needed.
......
......@@ -30,12 +30,12 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
/// @notice The max depth of the game.
uint256 public immutable MAX_GAME_DEPTH;
/// @notice The duration of the game.
Duration public immutable GAME_DURATION;
/// @notice A hypervisor that performs single instruction steps on a fault proof program trace.
IBigStepper public immutable VM;
/// @notice The duration of the game.
Duration internal constant GAME_DURATION = Duration.wrap(7 days);
/// @notice The root claim's position is always at gindex 1.
Position internal constant ROOT_POSITION = Position.wrap(1);
......@@ -58,10 +58,12 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
constructor(
Claim _absolutePrestate,
uint256 _maxGameDepth,
Duration _gameDuration,
IBigStepper _vm
) Semver(0, 0, 3) {
ABSOLUTE_PRESTATE = _absolutePrestate;
MAX_GAME_DEPTH = _maxGameDepth;
GAME_DURATION = _gameDuration;
VM = _vm;
}
......
......@@ -29,7 +29,12 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init {
function init(Claim rootClaim, Claim absolutePrestate) public {
super.setUp();
// Deploy an implementation of the fault game
gameImpl = new FaultDisputeGame(absolutePrestate, 4, new AlphabetVM(absolutePrestate));
gameImpl = new FaultDisputeGame(
absolutePrestate,
4,
Duration.wrap(7 days),
new AlphabetVM(absolutePrestate)
);
// Register the game implementation with the factory.
factory.setImplementation(GAME_TYPE, gameImpl);
// Create a new game.
......
......@@ -35,7 +35,7 @@ contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20,
_;
}
/// @custom:semver 1.0.1
/// @custom:semver 1.0.2
/// @param _bridge Address of the L2 standard bridge.
/// @param _remoteToken Address of the corresponding L1 token.
/// @param _name ERC20 name.
......@@ -45,7 +45,7 @@ contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20,
address _remoteToken,
string memory _name,
string memory _symbol
) ERC20(_name, _symbol) Semver(1, 0, 1) {
) ERC20(_name, _symbol) Semver(1, 0, 2) {
REMOTE_TOKEN = _remoteToken;
BRIDGE = _bridge;
}
......@@ -79,7 +79,7 @@ contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20,
/// @notice ERC165 interface check function.
/// @param _interfaceId Interface ID to check.
/// @return Whether or not the interface is supported by this contract.
function supportsInterface(bytes4 _interfaceId) external pure returns (bool) {
function supportsInterface(bytes4 _interfaceId) external pure virtual returns (bool) {
bytes4 iface1 = type(IERC165).interfaceId;
// Interface corresponding to the legacy L2StandardERC20.
bytes4 iface2 = type(ILegacyMintableERC20).interfaceId;
......
......@@ -45,5 +45,6 @@
"l1StartingBlockTag": "earliest",
"l2GenesisRegolithTimeOffset": "0x0",
"faultGameAbsolutePrestate": 140,
"faultGameMaxDepth": 4
"faultGameMaxDepth": 4,
"faultGameMaxDuration": 604800
}
......@@ -9,8 +9,10 @@
"contracts/**/*.sol"
],
"scripts": {
"bindings": "cd ../../op-bindings && make",
"build": "npx nx build:contracts",
"bindings": "pnpm bindings:ts && pnpm bindings:go",
"bindings:ts": "pnpm generate:addresses && nx generate @eth-optimism/contracts-ts",
"bindings:go": "cd ../../op-bindings && make",
"build": "nx build:contracts",
"prebuild:contracts": "./scripts/verify-foundry-install.sh",
"build:contracts": "pnpm build:forge",
"build:forge": "forge build",
......
......@@ -697,6 +697,7 @@ contract Deploy is Deployer {
factory.setImplementation(GameTypes.FAULT, new FaultDisputeGame({
_absolutePrestate: absolutePrestate,
_maxGameDepth: cfg.faultGameMaxDepth(),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_vm: faultVm
}));
console.log("DisputeGameFactory: set `FaultDisputeGame` implementation");
......
......@@ -47,6 +47,7 @@ contract DeployConfig is Script {
uint256 public l2GenesisRegolithTimeOffset;
uint256 public faultGameAbsolutePrestate;
uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration;
constructor(string memory _path) {
console.log("DeployConfig: reading file %s", _path);
......@@ -88,6 +89,7 @@ contract DeployConfig is Script {
if (block.chainid == 900) {
faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate");
faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth");
faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration");
}
}
......
artifacts
cache
typechain
.deps
.envrc
.env
/dist/
coverage
artifacts
cache
typechain
.deps
.envrc
.env
/dist/
module.exports = {
...require('../../.prettierrc.js'),
}
# Code gen
Summary -
- This package is generated from [contracts-bedrock](../contracts-bedrock/)
- It's version is kept in sync with contracts bedrock via the [changeset config](../../.changeset/config.json) e.g. if contracts-bedrock is `4.2.0` this package will have the same version.
## Code gen instructions
To run the code gen run the `generate` script from [package.json](./package.json). Make sure node modules is installed.
```bash
pnpm i && pnpm generate
```
MIT License
Copyright (c) 2022 Optimism
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
## Contracts TS
[![codecov](https://codecov.io/gh/ethereum-optimism/optimism/branch/develop/graph/badge.svg?token=0VTG7PG7YR&flag=contracts-bedrock-tests)](https://codecov.io/gh/ethereum-optimism/optimism)
ABI and Address constants + generated code from [@eth-optimism/contracts-bedrock/](../contracts-bedrock/) for use in TypeScript.
Much of this package is generated. See [CODE_GEN.md](./CODE_GEN.md) for instructions on how to generate.
#### @eth-optimism/contracts-ts
The main entrypoint exports constants related to contracts bedrock as const. As const allows it to be [used in TypeScript with stronger typing than importing JSON](https://github.com/microsoft/TypeScript/issues/32063).
- Exports contract abis.
- Exports contract addresses
```typescript
import {
l2OutputOracleProxyABI,
l2OutputOracleAddresses,
} from '@eth-optimism/contracts-ts'
console.log(l2OutputOracleAddresses[10], abi)
```
Addresses are also exported as an object for convenience.
```typescript
import { addresses } from '@eth-optimism/contracts-ts'
console.log(addresses.l2OutputOracle[10])
```
#### @eth-optimism/contracts-ts/react
- All [React hooks](https://wagmi.sh/cli/plugins/react) `@eth-optimism/contracts-ts/react`
```typescript
import { useAddressManagerAddress } from '@eth-optimism/contracts-ts/react'
const component = () => {
const { data, error, loading } = useAddressManagerAddress()
if (loading) {
return <div>Loading</div>
}
if (err) {
return <div>Error</div>
}
return <div>{data}</div>
}
```
#### @eth-optimism/contracts-ts/actions
- All [wagmi actions](https://wagmi.sh/react/actions) for use in Vanilla JS or non react code
```typescript
import { readSystemConfig } from '@eth-optimism/contracts-ts/actions'
console.log(await readSystemConfig())
```
#### See Also
- [Contracts bedrock specs](../../specs/)
- [Wagmi](https://wagmi.sh)
This diff is collapsed.
{
"AddressManager": {
"1": "0xdE1FCfB0851916CA5101820A69b13a4E276bd81F",
"5": "0xa6f73589243a6A7a9023b1Fa0651b1d89c177111"
},
"AssetReceiver": {
"1": "0x15DdA60616Ffca20371ED1659dBB78E888f65556",
"10": "0x15DdA60616Ffca20371ED1659dBB78E888f65556"
},
"CheckBalanceHigh": {
"1": "0x7eC64a8a591bFf829ff6C8be76074D540ACb813F",
"5": "0x7eC64a8a591bFf829ff6C8be76074D540ACb813F",
"420": "0x5d7103853f12109A7d27F118e54BbC654ad847E9"
},
"CheckBalanceLow": {
"1": "0x381a4eFC2A2C914eA1889722bB4B44Fa6BD5b640",
"5": "0x381a4eFC2A2C914eA1889722bB4B44Fa6BD5b640",
"420": "0x7Ce13D154FAEE5C8B3E6b19d4Add16f21d884474"
},
"CheckGelatoLow": {
"1": "0x4f7CFc43f6D262a085F3b946cAC69E7a8E39BBAa",
"5": "0x4f7CFc43f6D262a085F3b946cAC69E7a8E39BBAa",
"420": "0xF9c8a4Cb4021f57F9f6d69799cA9BefF64524862"
},
"CheckTrue": {
"1": "0x5c741a38cb11424711231777D71689C458eE835D",
"5": "0x5c741a38cb11424711231777D71689C458eE835D",
"420": "0x47443D0C184e022F19BD1578F5bca6B8a9F58E32"
},
"Drippie": {
"1": "0x44b3A2a040057eBafC601A78647e805fd58B1f50"
},
"Drippie_goerli": {
"5": "0x44b3A2a040057eBafC601A78647e805fd58B1f50"
},
"Drippie_optimism-goerli": {
"420": "0x8D8d533C16D23847EB04EEB0925be8900Dd3af86"
},
"L1CrossDomainMessenger": {
"1": "0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1",
"5": "0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"
},
"L1ERC721Bridge": {
"1": "0x5a7749f83b81B301cAb5f48EB8516B986DAef23D",
"5": "0x8DD330DdE8D9898d43b4dc840Da27A07dF91b3c9"
},
"L1StandardBridge": {
"1": "0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1",
"5": "0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"
},
"L2OutputOracle": {
"1": "0xdfe97868233d1aa22e815a266982f2cf17685a27",
"5": "0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"
},
"OptimismMintableERC20Factory": {
"1": "0x4200000000000000000000000000000000000012",
"5": "0x4200000000000000000000000000000000000012",
"420": "0x4200000000000000000000000000000000000012"
},
"OptimismPortal": {
"1": "0xbEb5Fc579115071764c7423A4f12eDde41f106Ed",
"5": "0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"
},
"PortalSender": {
"1": "0x0A893d9576b9cFD9EF78595963dc973238E78210",
"5": "0xe7FACd39531ee3C313330E93B4d7a8B8A3c84Aa4"
},
"ProxyAdmin": {
"1": "0x4200000000000000000000000000000000000018",
"5": "0x4200000000000000000000000000000000000018"
},
"SystemConfig": {
"1": "0x229047fed2591dbec1eF1118d64F7aF3dB9EB290",
"5": "0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"
},
"SystemDictator": {
"1": "0xB4453CEb33d2e67FA244A24acf2E50CEF31F53cB"
},
"SystemDictator_goerli": {
"5": "0x1f0613A44c9a8ECE7B3A2e0CdBdF0F5B47A50971"
},
"TeleportrWithdrawer": {
"1": "0x78A25524D90E3D0596558fb43789bD800a5c3007"
},
"AttestationStation": {
"10": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"420": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77"
},
"L2ERC721Bridge": {
"10": "0x4200000000000000000000000000000000000014",
"420": "0x4200000000000000000000000000000000000014"
},
"MintManager": {
"10": "0x5C4e7Ba1E219E47948e6e3F55019A647bA501005",
"420": "0x038a8825A3C3B0c08d52Cc76E5E361953Cf6Dc76"
},
"OptimismMintableERC721Factory": {
"10": "0x4200000000000000000000000000000000000017"
},
"OptimismMintableERC721Factory_optimism-goerli": {
"420": "0x4200000000000000000000000000000000000017"
},
"Optimist": {
"10": "0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5",
"420": "0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5"
},
"OptimistAllowlist": {
"10": "0x482b1945D58f2E9Db0CEbe13c7fcFc6876b41180",
"420": "0x482b1945D58f2E9Db0CEbe13c7fcFc6876b41180"
},
"OptimistInviter": {
"10": "0x073031A1E1b8F5458Ed41Ce56331F5fd7e1de929",
"420": "0x073031A1E1b8F5458Ed41Ce56331F5fd7e1de929"
},
"BaseFeeVault": {
"420": "0x4200000000000000000000000000000000000019"
},
"GasPriceOracle": {
"420": "0x420000000000000000000000000000000000000F"
},
"L1Block": {
"420": "0x4200000000000000000000000000000000000015"
},
"L1FeeVault": {
"420": "0x420000000000000000000000000000000000001a"
},
"L2CrossDomainMessenger": {
"420": "0x4200000000000000000000000000000000000007"
},
"L2StandardBridge": {
"420": "0x4200000000000000000000000000000000000010"
},
"L2ToL1MessagePasser": {
"420": "0x4200000000000000000000000000000000000016"
},
"SequencerFeeVault": {
"420": "0x4200000000000000000000000000000000000011"
}
}
VITE_RPC_URL_L2_GOERLI=
VITE_RPC_URL_L2_MAINNET=
VITE_RPC_URL_L1_GOERLI=
VITE_RPC_URL_L1_MAINNET=
{
"name": "@eth-optimism/contracts-ts",
"version": "0.15.0",
"description": "TypeScript interface for Contracts Bedrock",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git",
"directory": "packages/contracts-ts"
},
"homepage": "https://optimism.io",
"type": "module",
"main": "dist/constants.js",
"module": "dist/constants.mjs",
"types": "src/constants.ts",
"exports": {
".": {
"types": "./src/constants.ts",
"import": "./dist/constants.js",
"require": "./dist/constants.cjs"
},
"./actions": {
"types": "./src/actions.ts",
"import": "./dist/actions.js",
"require": "./dist/actions.cjs"
},
"./react": {
"types": "./src/react.ts",
"import": "./dist/react.js",
"require": "./dist/react.cjs"
}
},
"files": [
"dist/",
"src/"
],
"scripts": {
"build": "tsup",
"generate": "wagmi generate && pnpm build && pnpm lint:fix",
"generate:check": "pnpm generate && git diff --exit-code ./addresses.json && git diff --exit-code ./abis.json",
"lint": "prettier --check .",
"lint:fix": "prettier --write .",
"test": "vitest",
"test:coverage": "vitest run --coverage",
"typecheck": "tsc --noEmit"
},
"devDependencies": {
"@eth-optimism/contracts-bedrock": "workspace:*",
"@testing-library/jest-dom": "^5.17.0",
"@testing-library/react-hooks": "^8.0.1",
"@types/glob": "^8.1.0",
"@vitest/coverage-istanbul": "^0.33.0",
"@wagmi/cli": "^1.3.0",
"@wagmi/core": "^1.3.8",
"abitype": "^0.9.2",
"glob": "^10.3.3",
"isomorphic-fetch": "^3.0.0",
"jest-dom": "link:@types/@testing-library/jest-dom",
"jsdom": "^22.1.0",
"tsup": "^7.1.0",
"typescript": "^5.1.6",
"vite": "^4.4.4",
"vitest": "^0.33.0"
},
"peerDependencies": {
"@wagmi/core": ">1.0.0",
"wagmi": ">1.0.0"
},
"peerDependenciesMeta": {
"wagmi": {
"optional": true
},
"@wagmi/core": {
"optional": true
}
},
"dependencies": {
"@testing-library/react": "^14.0.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"viem": "^1.3.0"
}
}
import fetch from 'isomorphic-fetch'
// viem needs this
global.fetch = fetch
This diff is collapsed.
import { test, expect } from 'vitest'
import { addresses } from './constants'
import { readFileSync } from 'fs'
import { join } from 'path'
const jsonAddresses = JSON.parse(
readFileSync(join(__dirname, '../addresses.json'), 'utf8')
)
test('should have generated addresses', () => {
expect(addresses).toEqual(jsonAddresses)
})
This diff is collapsed.
import matchers from '@testing-library/jest-dom/matchers'
import { cleanup, waitFor } from '@testing-library/react'
import { renderHook } from '@testing-library/react-hooks'
import { afterEach, expect, test } from 'vitest'
import { useMintManagerOwner } from './react'
import { configureChains, createConfig, WagmiConfig } from 'wagmi'
import * as React from 'react'
import { optimism } from 'viem/chains'
import { jsonRpcProvider } from 'wagmi/providers/jsonRpc'
expect.extend(matchers)
afterEach(() => {
cleanup()
})
const { publicClient } = configureChains(
[optimism],
[
jsonRpcProvider({
rpc: () => ({
http:
import.meta.env.VITE_RPC_URL_L2_MAINNET ??
'https://mainnet.optimism.io',
}),
}),
]
)
const config = createConfig({
publicClient: ({ chainId }) => publicClient({ chainId }),
})
const blockNumber = BigInt(106806163)
test('react hooks should work', async () => {
const hook = renderHook(
() => useMintManagerOwner({ chainId: 10, blockNumber }),
{
wrapper: ({ children }) => (
<WagmiConfig config={config}>{children}</WagmiConfig>
),
}
)
await waitFor(() => {
hook.rerender()
if (hook.result.current.error) throw hook.result.current.error
expect(hook.result.current?.data).toBeDefined()
})
const normalizedResult = {
...hook.result.current,
internal: {
...hook.result.current.internal,
dataUpdatedAt: 'SNAPSHOT_TEST_REMOVED!!!',
},
}
expect(normalizedResult).toMatchInlineSnapshot(`
{
"data": "0x2A82Ae142b2e62Cb7D10b55E323ACB1Cab663a26",
"error": null,
"fetchStatus": "idle",
"internal": {
"dataUpdatedAt": "SNAPSHOT_TEST_REMOVED!!!",
"errorUpdatedAt": 0,
"failureCount": 0,
"isFetchedAfterMount": true,
"isLoadingError": false,
"isPaused": false,
"isPlaceholderData": false,
"isPreviousData": false,
"isRefetchError": false,
"isStale": true,
"remove": [Function],
},
"isError": false,
"isFetched": true,
"isFetchedAfterMount": true,
"isFetching": false,
"isIdle": false,
"isLoading": false,
"isRefetching": false,
"isSuccess": true,
"refetch": [Function],
"status": "success",
}
`)
})
This diff is collapsed.
/// <reference types="vite/client" />
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"baseUrl": "./src",
"strict": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "node",
"jsx": "react",
"target": "ESNext",
"noEmit": true
},
"include": ["./src"]
}
import { defineConfig } from 'tsup'
import packageJson from './package.json'
export default defineConfig({
name: packageJson.name,
entry: ['src/constants.ts', 'src/actions.ts', 'src/react.ts'],
outDir: 'dist',
format: ['esm', 'cjs'],
splitting: false,
sourcemap: true,
clean: false,
})
import { defineConfig } from 'vitest/config'
// @see https://vitest.dev/config/
export default defineConfig({
test: {
setupFiles: './setupVitest.ts',
environment: 'jsdom',
coverage: {
provider: 'istanbul',
},
},
})
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment