Commit c3a8e43f authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #3190 from ethereum-optimism/develop

Develop -> Master
parents f96b907c c666fedc
---
'@eth-optimism/sdk': minor
---
Update wsteth bridge address
---
'@eth-optimism/ci-builder': patch
---
Upgrade to Debian 11
...@@ -282,7 +282,7 @@ jobs: ...@@ -282,7 +282,7 @@ jobs:
command: yarn install --production=false command: yarn install --production=false
- run: - run:
name: specs toc name: specs toc
command: yarn lint:specs:toc && git diff --exit-code command: yarn lint:specs:toc && git diff --exit-code ./specs
- run: - run:
name: markdown lint name: markdown lint
command: yarn lint:specs:check command: yarn lint:specs:check
...@@ -585,6 +585,12 @@ workflows: ...@@ -585,6 +585,12 @@ workflows:
- op-bindings-build: - op-bindings-build:
requires: requires:
- yarn-monorepo - yarn-monorepo
- js-lint-test:
name: actor-tests-tests
package_name: actor-tests
dependencies: "(core-utils|sdk)"
requires:
- yarn-monorepo
- js-lint-test: - js-lint-test:
name: contracts-governance-tests name: contracts-governance-tests
package_name: contracts-governance package_name: contracts-governance
......
...@@ -6,6 +6,10 @@ on: ...@@ -6,6 +6,10 @@ on:
jobs: jobs:
changesets-integrity-checker: changesets-integrity-checker:
runs-on: ubuntu-latest runs-on: ubuntu-latest
on:
push:
branches-ignore:
- 'develop'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
......
...@@ -32,5 +32,14 @@ steps: ...@@ -32,5 +32,14 @@ steps:
- --cache=true - --cache=true
- --cache-ttl=48h - --cache-ttl=48h
waitFor: ['-'] waitFor: ['-']
- name: 'gcr.io/kaniko-project/executor:latest'
args:
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/actor-tests-bedrock:$_TAG
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/actor-tests-bedrock:$COMMIT_SHA
- --dockerfile=./ops/docker/Dockerfile.packages
- --target=actor-tests-bedrock
- --cache=true
- --cache-ttl=48h
waitFor: ['-']
options: options:
machineType: N1_HIGHCPU_32 machineType: N1_HIGHCPU_32
\ No newline at end of file
import { actor, run, setupActor } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
actor('Chain reader', () => {
let env: OptimismEnv
setupActor(async () => {
env = await OptimismEnv.new()
})
run(async (b) => {
const blockNumber = await b.bench('get block number', () =>
env.l2Provider.getBlockNumber()
)
await b.bench('get random block', () =>
env.l2Provider.getBlock(Math.floor(blockNumber * Math.random()))
)
})
})
import { utils, Wallet, BigNumber } from 'ethers'
import { expect } from 'chai'
import { setupActor, setupRun, actor, run } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
interface BenchContext {
l1Wallet: Wallet
l2Wallet: Wallet
}
const DEFAULT_TEST_GAS_L1 = 330_000
const DEFAULT_TEST_GAS_L2 = 1_300_000
actor('Funds depositor', () => {
let env: OptimismEnv
setupActor(async () => {
env = await OptimismEnv.new()
})
setupRun(async () => {
const wallet = Wallet.createRandom()
const tx = await env.l1Wallet.sendTransaction({
to: wallet.address,
value: utils.parseEther('0.01'),
})
await tx.wait()
return {
l1Wallet: wallet.connect(env.l1Wallet.provider),
l2Wallet: wallet.connect(env.l2Wallet.provider),
}
})
run(async (b, ctx: BenchContext) => {
const { l1Wallet, l2Wallet } = ctx
const balBefore = await l2Wallet.getBalance()
await b.bench('deposit', async () => {
await env.waitForXDomainTransaction(
env.messenger.contracts.l1.L1StandardBridge.connect(
l1Wallet
).depositETH(DEFAULT_TEST_GAS_L2, '0xFFFF', {
value: 0x42,
gasLimit: DEFAULT_TEST_GAS_L1,
})
)
})
// Converting BigNumber to hex string prevents chai from incorrectly considering inherited properties
// for strict equality - https://github.com/chaijs/chai/issues/948
expect(
(await l2Wallet.getBalance()).sub(balBefore).toString()
).to.deep.equal(BigNumber.from(0x42).toString())
})
})
import { utils, Wallet, Contract } from 'ethers'
import { expect } from 'chai'
import { actor, run, setupActor, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import ERC721 from '../artifacts/contracts/NFT.sol/NFT.json'
interface Context {
wallet: Wallet
contract: Contract
}
actor('NFT claimer', () => {
let env: OptimismEnv
let contract: Contract
setupActor(async () => {
env = await OptimismEnv.new()
contract = new Contract(process.env.ERC_721_ADDRESS, ERC721.abi)
})
setupRun(async () => {
const wallet = Wallet.createRandom().connect(env.l2Wallet.provider)
const tx = await env.l2Wallet.sendTransaction({
to: wallet.address,
value: utils.parseEther('0.01'),
})
await tx.wait()
return {
wallet,
contract: contract.connect(wallet),
}
})
run(async (b, ctx: Context) => {
let receipt: any
await b.bench('mint', async () => {
const tx = await ctx.contract.give()
receipt = await tx.wait()
})
expect(receipt.events[0].event).to.equal('Transfer')
expect(receipt.events[0].args[1]).to.equal(ctx.wallet.address)
})
})
import { utils, Wallet } from 'ethers'
import { expect } from 'chai'
import { actor, setupRun, setupActor, run } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
interface Context {
wallet: Wallet
}
actor('Value sender', () => {
let env: OptimismEnv
setupActor(async () => {
env = await OptimismEnv.new()
})
setupRun(async () => {
const wallet = Wallet.createRandom()
const tx = await env.l2Wallet.sendTransaction({
to: wallet.address,
value: utils.parseEther('0.01'),
})
await tx.wait()
return {
wallet: wallet.connect(env.l2Wallet.provider),
}
})
run(async (b, ctx: Context) => {
const randWallet = Wallet.createRandom().connect(env.l2Wallet.provider)
await b.bench('send funds', async () => {
const tx = await ctx.wallet.sendTransaction({
to: randWallet.address,
value: 0x42,
})
await tx.wait()
})
expect((await randWallet.getBalance()).toString()).to.deep.equal('66')
})
})
import { utils, Wallet, Contract } from 'ethers'
import { ethers } from 'hardhat'
import { expect } from 'chai'
import { actor, setupActor, run, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
interface Context {
wallet: Wallet
}
actor('Trie DoS accounts', () => {
let env: OptimismEnv
let contract: Contract
setupActor(async () => {
env = await OptimismEnv.new()
const factory = await ethers.getContractFactory('StateDOS', env.l2Wallet)
contract = await factory.deploy()
await contract.deployed()
})
setupRun(async () => {
const wallet = Wallet.createRandom()
const tx = await env.l2Wallet.sendTransaction({
to: wallet.address,
value: utils.parseEther('1'),
})
await tx.wait()
return {
wallet: wallet.connect(env.l2Wallet.provider),
}
})
run(async (b, ctx: Context) => {
await b.bench('DOS transactions', async () => {
const tx = await contract.connect(ctx.wallet).attack({
gasLimit: 9000000 + Math.floor(1000000 * Math.random()),
})
const receipt = await tx.wait()
// make sure that this was an actual transaction in a block
expect(receipt.blockNumber).to.be.gt(1)
expect(receipt.gasUsed.gte(8000000)).to.be.true
})
})
})
import { Contract, utils, Wallet } from 'ethers'
import { FeeAmount } from '@uniswap/v3-sdk'
import { abi as NFTABI } from '@uniswap/v3-periphery/artifacts/contracts/NonfungiblePositionManager.sol/NonfungiblePositionManager.json'
import { abi as RouterABI } from '@uniswap/v3-periphery/artifacts/contracts/SwapRouter.sol/SwapRouter.json'
import { actor, run, setupActor, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import ERC20 from '../artifacts/contracts/ERC20.sol/ERC20.json'
interface Context {
contracts: { [name: string]: Contract }
wallet: Wallet
}
actor('Uniswap swapper', () => {
let env: OptimismEnv
let tokens: [Contract, Contract]
let contracts: { [name: string]: Contract }
setupActor(async () => {
env = await OptimismEnv.new()
contracts = {
positionManager: new Contract(
process.env.UNISWAP_POSITION_MANAGER_ADDRESS,
NFTABI
).connect(env.l2Wallet),
router: new Contract(
process.env.UNISWAP_ROUTER_ADDRESS,
RouterABI
).connect(env.l2Wallet),
}
tokens = [
new Contract(process.env.UNISWAP_TOKEN_0_ADDRESS, ERC20.abi).connect(
env.l2Wallet
),
new Contract(process.env.UNISWAP_TOKEN_1_ADDRESS, ERC20.abi).connect(
env.l2Wallet
),
]
tokens =
tokens[0].address.toLowerCase() < tokens[1].address.toLowerCase()
? [tokens[0], tokens[1]]
: [tokens[1], tokens[0]]
})
setupRun(async () => {
const wallet = Wallet.createRandom().connect(env.l2Provider)
const sendTx = await env.l2Wallet.sendTransaction({
to: wallet.address,
value: utils.parseEther('0.1'),
})
await sendTx.wait()
for (const token of tokens) {
let tx = await token.transfer(wallet.address, 1000000)
await tx.wait()
const boundToken = token.connect(wallet)
tx = await boundToken.approve(
contracts.positionManager.address,
1000000000
)
await tx.wait()
tx = await boundToken.approve(contracts.router.address, 1000000000)
await tx.wait()
}
return {
contracts: Object.entries(contracts).reduce((acc, [name, value]) => {
acc[name] = value.connect(wallet)
return acc
}, {}),
wallet,
}
})
run(async (b, ctx: Context) => {
await b.bench('swap', async () => {
const tx = await ctx.contracts.router.exactInputSingle(
{
tokenIn: tokens[0].address,
tokenOut: tokens[1].address,
fee: FeeAmount.MEDIUM,
recipient: ctx.wallet.address,
deadline: Date.now() * 2,
amountIn: Math.max(Math.floor(1000 * Math.random()), 100),
amountOutMinimum: 0,
sqrtPriceLimitX96: 0,
},
{
gasLimit: 10000000,
}
)
await tx.wait()
})
})
})
...@@ -48,10 +48,8 @@ ...@@ -48,10 +48,8 @@
"@uniswap/v3-core": "1.0.0", "@uniswap/v3-core": "1.0.0",
"@uniswap/v3-periphery": "^1.0.1", "@uniswap/v3-periphery": "^1.0.1",
"@uniswap/v3-sdk": "^3.6.2", "@uniswap/v3-sdk": "^3.6.2",
"async-mutex": "^0.3.2",
"chai": "^4.3.4", "chai": "^4.3.4",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
"commander": "^8.3.0",
"dotenv": "^10.0.0", "dotenv": "^10.0.0",
"envalid": "^7.1.0", "envalid": "^7.1.0",
"eslint": "^7.27.0", "eslint": "^7.27.0",
...@@ -69,7 +67,6 @@ ...@@ -69,7 +67,6 @@
"lint-staged": "11.0.0", "lint-staged": "11.0.0",
"mocha": "^8.4.0", "mocha": "^8.4.0",
"node-fetch": "^2.6.7", "node-fetch": "^2.6.7",
"prom-client": "^14.0.1",
"rimraf": "^3.0.2", "rimraf": "^3.0.2",
"typescript": "^4.3.5", "typescript": "^4.3.5",
"uniswap-v3-deploy-plugin": "^0.1.0" "uniswap-v3-deploy-plugin": "^0.1.0"
......
...@@ -42,6 +42,8 @@ type Metrics struct { ...@@ -42,6 +42,8 @@ type Metrics struct {
LastPipelineResetUnix prometheus.Gauge LastPipelineResetUnix prometheus.Gauge
UnsafePayloadsTotal prometheus.Counter UnsafePayloadsTotal prometheus.Counter
DerivationErrorsTotal prometheus.Counter DerivationErrorsTotal prometheus.Counter
SequencingErrorsTotal prometheus.Counter
PublishingErrorsTotal prometheus.Counter
Heads *prometheus.GaugeVec Heads *prometheus.GaugeVec
TransactionsSequencedTotal prometheus.Counter TransactionsSequencedTotal prometheus.Counter
...@@ -141,6 +143,16 @@ func NewMetrics(procName string) *Metrics { ...@@ -141,6 +143,16 @@ func NewMetrics(procName string) *Metrics {
Name: "derivation_errors_total", Name: "derivation_errors_total",
Help: "Count of total derivation errors", Help: "Count of total derivation errors",
}), }),
SequencingErrorsTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "sequencing_errors_total",
Help: "Count of total sequencing errors",
}),
PublishingErrorsTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "publishing_errors_total",
Help: "Count of total p2p publishing errors",
}),
Heads: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{ Heads: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: "heads", Name: "heads",
...@@ -232,7 +244,6 @@ func (m *Metrics) SetHead(kind string, num uint64) { ...@@ -232,7 +244,6 @@ func (m *Metrics) SetHead(kind string, num uint64) {
func (m *Metrics) RecordPipelineReset() { func (m *Metrics) RecordPipelineReset() {
m.PipelineResetsTotal.Inc() m.PipelineResetsTotal.Inc()
m.DerivationErrorsTotal.Inc()
m.LastPipelineResetUnix.Set(float64(time.Now().Unix())) m.LastPipelineResetUnix.Set(float64(time.Now().Unix()))
} }
......
...@@ -22,7 +22,7 @@ type L1ReceiptsFetcher interface { ...@@ -22,7 +22,7 @@ type L1ReceiptsFetcher interface {
// by setting NoTxPool=false as sequencer, or by appending batch transactions as verifier. // by setting NoTxPool=false as sequencer, or by appending batch transactions as verifier.
// The severity of the error is returned; a crit=false error means there was a temporary issue, like a failed RPC or time-out. // The severity of the error is returned; a crit=false error means there was a temporary issue, like a failed RPC or time-out.
// A crit=true error means the input arguments are inconsistent or invalid. // A crit=true error means the input arguments are inconsistent or invalid.
func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1ReceiptsFetcher, l2Parent eth.L2BlockRef, timestamp uint64, epoch eth.BlockID) (attrs *eth.PayloadAttributes, crit bool, err error) { func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1ReceiptsFetcher, l2Parent eth.L2BlockRef, timestamp uint64, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error) {
var l1Info eth.L1Info var l1Info eth.L1Info
var depositTxs []hexutil.Bytes var depositTxs []hexutil.Bytes
var seqNumber uint64 var seqNumber uint64
...@@ -33,25 +33,42 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece ...@@ -33,25 +33,42 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
if l2Parent.L1Origin.Number != epoch.Number { if l2Parent.L1Origin.Number != epoch.Number {
info, _, receipts, err := dl.Fetch(ctx, epoch.Hash) info, _, receipts, err := dl.Fetch(ctx, epoch.Hash)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("failed to fetch L1 block info and receipts: %w", err) return nil, NewTemporaryError(
err,
"failed to fetch L1 block info and receipts",
)
} }
if l2Parent.L1Origin.Hash != info.ParentHash() { if l2Parent.L1Origin.Hash != info.ParentHash() {
return nil, true, fmt.Errorf("cannot create new block with L1 origin %s (parent %s) on top of L1 origin %s", epoch, info.ParentHash(), l2Parent.L1Origin) return nil, NewResetError(
nil,
fmt.Sprintf("cannot create new block with L1 origin %s (parent %s) on top of L1 origin %s",
epoch, info.ParentHash(), l2Parent.L1Origin),
)
} }
deposits, err := DeriveDeposits(receipts, cfg.DepositContractAddress) deposits, err := DeriveDeposits(receipts, cfg.DepositContractAddress)
if err != nil { if err != nil {
return nil, true, fmt.Errorf("failed to derive some deposits: %w", err) return nil, NewResetError(
err,
"failed to derive some deposits",
)
} }
l1Info = info l1Info = info
depositTxs = deposits depositTxs = deposits
seqNumber = 0 seqNumber = 0
} else { } else {
if l2Parent.L1Origin.Hash != epoch.Hash { if l2Parent.L1Origin.Hash != epoch.Hash {
return nil, true, fmt.Errorf("cannot create new block with L1 origin %s in conflict with L1 origin %s", epoch, l2Parent.L1Origin) return nil, NewResetError(
nil,
fmt.Sprintf("cannot create new block with L1 origin %s in conflict with L1 origin %s",
epoch, l2Parent.L1Origin),
)
} }
info, err := dl.InfoByHash(ctx, epoch.Hash) info, err := dl.InfoByHash(ctx, epoch.Hash)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("failed to fetch L1 block info: %w", err) return nil, NewTemporaryError(
err,
"failed to fetch L1 block info",
)
} }
l1Info = info l1Info = info
depositTxs = nil depositTxs = nil
...@@ -60,7 +77,10 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece ...@@ -60,7 +77,10 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
l1InfoTx, err := L1InfoDepositBytes(seqNumber, l1Info) l1InfoTx, err := L1InfoDepositBytes(seqNumber, l1Info)
if err != nil { if err != nil {
return nil, true, fmt.Errorf("failed to create l1InfoTx: %w", err) return nil, NewResetError(
err,
"failed to create l1InfoTx",
)
} }
txs := make([]hexutil.Bytes, 0, 1+len(depositTxs)) txs := make([]hexutil.Bytes, 0, 1+len(depositTxs))
...@@ -73,5 +93,5 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece ...@@ -73,5 +93,5 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
SuggestedFeeRecipient: cfg.FeeRecipientAddress, SuggestedFeeRecipient: cfg.FeeRecipientAddress,
Transactions: txs, Transactions: txs,
NoTxPool: true, NoTxPool: true,
}, false, nil }, nil
} }
...@@ -2,7 +2,6 @@ package derive ...@@ -2,7 +2,6 @@ package derive
import ( import (
"context" "context"
"fmt"
"io" "io"
"time" "time"
...@@ -55,14 +54,9 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error { ...@@ -55,14 +54,9 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error {
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel() defer cancel()
attrs, crit, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Timestamp, batch.Epoch()) attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Timestamp, batch.Epoch())
if err != nil { if err != nil {
if crit { return err
return fmt.Errorf("failed to prepare payload attributes for batch: %v", err)
} else {
aq.log.Error("temporarily failing to prepare payload attributes for batch", "err", err)
return nil
}
} }
// we are verifying, not sequencing, we've got all transactions and do not pull from the tx-pool // we are verifying, not sequencing, we've got all transactions and do not pull from the tx-pool
......
...@@ -36,9 +36,9 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -36,9 +36,9 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Info.InfoNum = l2Parent.L1Origin.Number + 1 l1Info.InfoNum = l2Parent.L1Origin.Number + 1
epoch := l1Info.ID() epoch := l1Info.ID()
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil) l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) _, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected") require.NotNil(t, err, "inconsistent L1 origin error expected")
require.True(t, crit, "inconsistent L1 origin transition must be handled like a critical error with reorg") require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg")
}) })
t.Run("inconsistent equal height origin", func(t *testing.T) { t.Run("inconsistent equal height origin", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -49,9 +49,9 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -49,9 +49,9 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Info := testutils.RandomL1Info(rng) l1Info := testutils.RandomL1Info(rng)
l1Info.InfoNum = l2Parent.L1Origin.Number l1Info.InfoNum = l2Parent.L1Origin.Number
epoch := l1Info.ID() epoch := l1Info.ID()
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) _, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected") require.NotNil(t, err, "inconsistent L1 origin error expected")
require.True(t, crit, "inconsistent L1 origin transition must be handled like a critical error with reorg") require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg")
}) })
t.Run("rpc fail Fetch", func(t *testing.T) { t.Run("rpc fail Fetch", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -63,9 +63,9 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -63,9 +63,9 @@ func TestPreparePayloadAttributes(t *testing.T) {
epoch.Number += 1 epoch.Number += 1
mockRPCErr := errors.New("mock rpc error") mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectFetch(epoch.Hash, nil, nil, nil, mockRPCErr) l1Fetcher.ExpectFetch(epoch.Hash, nil, nil, nil, mockRPCErr)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) _, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected") require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.False(t, crit, "rpc errors should not be critical, it is not necessary to reorg") require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg")
}) })
t.Run("rpc fail InfoByHash", func(t *testing.T) { t.Run("rpc fail InfoByHash", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -76,9 +76,9 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -76,9 +76,9 @@ func TestPreparePayloadAttributes(t *testing.T) {
epoch := l2Parent.L1Origin epoch := l2Parent.L1Origin
mockRPCErr := errors.New("mock rpc error") mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectInfoByHash(epoch.Hash, nil, mockRPCErr) l1Fetcher.ExpectInfoByHash(epoch.Hash, nil, mockRPCErr)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) _, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected") require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.False(t, crit, "rpc errors should not be critical, it is not necessary to reorg") require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg")
}) })
t.Run("next origin without deposits", func(t *testing.T) { t.Run("next origin without deposits", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -93,9 +93,8 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -93,9 +93,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1InfoTx, err := L1InfoDepositBytes(0, l1Info) l1InfoTx, err := L1InfoDepositBytes(0, l1Info)
require.NoError(t, err) require.NoError(t, err)
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil) l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err) require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs) require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp)) require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao) require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao)
...@@ -132,9 +131,8 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -132,9 +131,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
// txs are ignored, API is a bit bloated to previous approach. Only l1Info and receipts matter. // txs are ignored, API is a bit bloated to previous approach. Only l1Info and receipts matter.
l1Txs := make(types.Transactions, len(receipts)) l1Txs := make(types.Transactions, len(receipts))
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, l1Txs, receipts, nil) l1Fetcher.ExpectFetch(epoch.Hash, l1Info, l1Txs, receipts, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err) require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs) require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp)) require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao) require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao)
...@@ -158,9 +156,8 @@ func TestPreparePayloadAttributes(t *testing.T) { ...@@ -158,9 +156,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil) l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch) attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err) require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs) require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp)) require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao) require.Equal(t, eth.Bytes32(l1Info.InfoMixDigest), attrs.PrevRandao)
......
...@@ -2,9 +2,11 @@ package derive ...@@ -2,9 +2,11 @@ package derive
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"sort" "sort"
"time"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -79,6 +81,7 @@ func (bq *BatchQueue) Step(ctx context.Context, outer Progress) error { ...@@ -79,6 +81,7 @@ func (bq *BatchQueue) Step(ctx context.Context, outer Progress) error {
for _, batch := range batches { for _, batch := range batches {
if uint64(batch.Timestamp) <= bq.next.SafeL2Head().Time { if uint64(batch.Timestamp) <= bq.next.SafeL2Head().Time {
bq.log.Debug("Dropping batch", "SafeL2Head", bq.next.SafeL2Head(), "SafeL2Head_Time", bq.next.SafeL2Head().Time, "batch_timestamp", batch.Timestamp)
// drop attributes if we are still progressing towards the next stage // drop attributes if we are still progressing towards the next stage
// (after a reset rolled us back a full sequence window) // (after a reset rolled us back a full sequence window)
continue continue
...@@ -143,7 +146,23 @@ func (bq *BatchQueue) validExtension(batch *BatchWithL1InclusionBlock, prevTime, ...@@ -143,7 +146,23 @@ func (bq *BatchQueue) validExtension(batch *BatchWithL1InclusionBlock, prevTime,
return false return false
} }
// TODO: Also check EpochHash (hard b/c maybe extension) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
l1BlockRef, err := bq.dl.L1BlockRefByNumber(ctx, batch.Batch.Epoch().Number)
cancel()
if err != nil {
bq.log.Warn("err fetching l1 block", "err", err)
if errors.Is(err, ErrTemporary) {
// Skipping validation in case of temporary RPC error
bq.log.Warn("temporary err - skipping epoch hash validation", "err", err)
return true
} else {
return false
}
}
if l1BlockRef.Hash != batch.Batch.EpochHash {
return false
}
// Note: `Batch.EpochNum` is an external input, but it is constrained to be a reasonable size by the // Note: `Batch.EpochNum` is an external input, but it is constrained to be a reasonable size by the
// above equality checks. // above equality checks.
...@@ -201,7 +220,7 @@ func (bq *BatchQueue) deriveBatches(ctx context.Context, l2SafeHead eth.L2BlockR ...@@ -201,7 +220,7 @@ func (bq *BatchQueue) deriveBatches(ctx context.Context, l2SafeHead eth.L2BlockR
bq.log.Trace("found batches", "len", len(batches)) bq.log.Trace("found batches", "len", len(batches))
// Filter + Fill batches // Filter + Fill batches
batches = FilterBatches(bq.log, bq.config, epoch.ID(), minL2Time, maxL2Time, batches) batches = FilterBatches(bq.log, bq.config, epoch.ID(), minL2Time, maxL2Time, batches)
bq.log.Trace("filtered batches", "len", len(batches), "l1Origin", bq.l1Blocks[0], "nextL1Block", bq.l1Blocks[1]) bq.log.Trace("filtered batches", "len", len(batches), "l1Origin", bq.l1Blocks[0], "nextL1Block", bq.l1Blocks[1], "minL2Time", minL2Time, "maxL2Time", maxL2Time)
batches = FillMissingBatches(batches, epoch.ID(), bq.config.BlockTime, minL2Time, nextL1BlockTime) batches = FillMissingBatches(batches, epoch.ID(), bq.config.BlockTime, minL2Time, nextL1BlockTime)
bq.log.Trace("added missing batches", "len", len(batches), "l1OriginTime", l1OriginTime, "nextL1BlockTime", nextL1BlockTime) bq.log.Trace("added missing batches", "len", len(batches), "l1OriginTime", l1OriginTime, "nextL1BlockTime", nextL1BlockTime)
// Advance an epoch after filling all batches. // Advance an epoch after filling all batches.
......
...@@ -74,8 +74,10 @@ func (ib *ChannelBank) IngestData(data []byte) error { ...@@ -74,8 +74,10 @@ func (ib *ChannelBank) IngestData(data []byte) error {
} }
ib.log.Debug("channel bank got new data", "origin", ib.progress.Origin, "data_len", len(data)) ib.log.Debug("channel bank got new data", "origin", ib.progress.Origin, "data_len", len(data))
if len(data) < 1 { if len(data) < 1 {
ib.log.Error("data must be at least have a version byte, but got empty string") return NewTemporaryError(
return nil nil,
"data must be at least have a version byte, but got empty string",
)
} }
if data[0] != DerivationVersion0 { if data[0] != DerivationVersion0 {
...@@ -136,7 +138,7 @@ func (ib *ChannelBank) IngestData(data []byte) error { ...@@ -136,7 +138,7 @@ func (ib *ChannelBank) IngestData(data []byte) error {
} }
ib.log.Debug("ingesting frame", "channel", f.ID, "frame_number", f.FrameNumber, "length", len(f.Data)) ib.log.Debug("ingesting frame", "channel", f.ID, "frame_number", f.FrameNumber, "length", len(f.Data))
if err := currentCh.IngestData(f.FrameNumber, f.IsLast, f.Data); err != nil { if err := currentCh.IngestData(uint64(f.FrameNumber), f.IsLast, f.Data); err != nil {
ib.log.Debug("failed to ingest frame into channel", "channel", f.ID, "frame_number", f.FrameNumber, "err", err) ib.log.Debug("failed to ingest frame into channel", "channel", f.ID, "frame_number", f.FrameNumber, "err", err)
if done { if done {
return nil return nil
...@@ -219,8 +221,10 @@ func (ib *ChannelBank) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error ...@@ -219,8 +221,10 @@ func (ib *ChannelBank) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error
// go back in history if we are not distant enough from the next stage // go back in history if we are not distant enough from the next stage
parent, err := l1Fetcher.L1BlockRefByHash(ctx, ib.progress.Origin.ParentHash) parent, err := l1Fetcher.L1BlockRefByHash(ctx, ib.progress.Origin.ParentHash)
if err != nil { if err != nil {
ib.log.Error("failed to find channel bank block, failed to retrieve L1 reference", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to find channel bank block, failed to retrieve L1 reference: %v", err),
)
} }
ib.progress.Origin = parent ib.progress.Origin = parent
return nil return nil
......
package derive package derive
import ( import (
"bytes"
"fmt"
"math/rand" "math/rand"
"strconv" "strconv"
"strings" "strings"
...@@ -106,32 +108,28 @@ func (tf testFrame) Content() []byte { ...@@ -106,32 +108,28 @@ func (tf testFrame) Content() []byte {
return []byte(strings.TrimSuffix(parts[3], "!")) return []byte(strings.TrimSuffix(parts[3], "!"))
} }
func (tf testFrame) Encode() []byte { func (tf testFrame) ToFrame() Frame {
chID := tf.ChannelID() return Frame{
var out []byte ID: tf.ChannelID(),
out = append(out, chID.Data[:]...) FrameNumber: uint16(tf.FrameNumber()),
out = append(out, makeUVarint(chID.Time)...) Data: tf.Content(),
out = append(out, makeUVarint(tf.FrameNumber())...) IsLast: tf.IsLast(),
content := tf.Content()
out = append(out, makeUVarint(uint64(len(content)))...)
out = append(out, content...)
if tf.IsLast() {
out = append(out, 1)
} else {
out = append(out, 0)
} }
return out
} }
func (bt *bankTestSetup) ingestData(data []byte) { func (bt *bankTestSetup) ingestData(data []byte) {
require.NoError(bt.t, bt.cb.IngestData(data)) require.NoError(bt.t, bt.cb.IngestData(data))
} }
func (bt *bankTestSetup) ingestFrames(frames ...testFrame) { func (bt *bankTestSetup) ingestFrames(frames ...testFrame) {
data := []byte{DerivationVersion0} data := new(bytes.Buffer)
data.WriteByte(DerivationVersion0)
for _, fr := range frames { for _, fr := range frames {
data = append(data, fr.Encode()...) f := fr.ToFrame()
if err := f.MarshalBinary(data); err != nil {
panic(fmt.Errorf("error in making frame during test: %w", err))
}
} }
bt.ingestData(data) bt.ingestData(data.Bytes())
} }
func (bt *bankTestSetup) repeatStep(max int, outer int, outerClosed bool, err error) { func (bt *bankTestSetup) repeatStep(max int, outer int, outerClosed bool, err error) {
require.Equal(bt.t, err, RepeatStep(bt.t, bt.cb.Step, Progress{Origin: bt.origins[outer], Closed: outerClosed}, max)) require.Equal(bt.t, err, RepeatStep(bt.t, bt.cb.Step, Progress{Origin: bt.origins[outer], Closed: outerClosed}, max))
...@@ -292,10 +290,14 @@ func TestL1ChannelBank(t *testing.T) { ...@@ -292,10 +290,14 @@ func TestL1ChannelBank(t *testing.T) {
bt.assertOriginTime(101) bt.assertOriginTime(101)
badTx := []byte{DerivationVersion0} badTx := new(bytes.Buffer)
badTx = append(badTx, testFrame("a:101:0:helloworld!").Encode()...) badTx.WriteByte(DerivationVersion0)
badTx = append(badTx, testutils.RandomData(bt.rng, 30)...) // incomplete frame data goodFrame := testFrame("a:101:0:helloworld!").ToFrame()
bt.ingestData(badTx) if err := goodFrame.MarshalBinary(badTx); err != nil {
panic(fmt.Errorf("error in marshalling frame: %w", err))
}
badTx.Write(testutils.RandomData(bt.rng, 30)) // incomplete frame data
bt.ingestData(badTx.Bytes())
bt.expectChannel("helloworld") // can still read the frames before the invalid data bt.expectChannel("helloworld") // can still read the frames before the invalid data
bt.repeatStep(2, 0, false, nil) bt.repeatStep(2, 0, false, nil)
bt.assertExpectations() bt.assertExpectations()
......
...@@ -12,70 +12,56 @@ import ( ...@@ -12,70 +12,56 @@ import (
// but we leave space to grow larger anyway (gas limit allows for more data). // but we leave space to grow larger anyway (gas limit allows for more data).
const MaxFrameLen = 1_000_000 const MaxFrameLen = 1_000_000
var ErrNotEnoughFrameBytes = errors.New("not enough available bytes for the frame")
// Data Format // Data Format
// //
// frame = channel_id ++ frame_number ++ frame_data_length ++ frame_data ++ is_last // frame = channel_id ++ frame_number ++ frame_data_length ++ frame_data ++ is_last
// //
// channel_id = random ++ timestamp // channel_id = random ++ timestamp
// random = bytes32 // random = bytes32
// timestamp = uvarint // timestamp = uint64
// frame_number = uvarint // frame_number = uint16
// frame_data_length = uvarint // frame_data_length = uint32
// frame_data = bytes // frame_data = bytes
// is_last = bool // is_last = bool
type Frame struct { type Frame struct {
ID ChannelID ID ChannelID
FrameNumber uint64 FrameNumber uint16
Data []byte Data []byte
IsLast bool IsLast bool
} }
// MarshalBinary writes the frame to `w`. // MarshalBinary writes the frame to `w`.
// It returns the number of bytes written as well as any // It returns any errors encountered while writing, but
// error encountered while writing. // generally expects the writer very rarely fail.
func (f *Frame) MarshalBinary(w io.Writer) (int, error) { func (f *Frame) MarshalBinary(w io.Writer) error {
n, err := w.Write(f.ID.Data[:]) _, err := w.Write(f.ID.Data[:])
if err != nil { if err != nil {
return n, err return err
} }
l, err := w.Write(makeUVarint(f.ID.Time)) if err := binary.Write(w, binary.BigEndian, f.ID.Time); err != nil {
n += l return err
if err != nil {
return n, err
} }
l, err = w.Write(makeUVarint(f.FrameNumber)) if err := binary.Write(w, binary.BigEndian, f.FrameNumber); err != nil {
n += l return err
if err != nil {
return n, err
} }
if err := binary.Write(w, binary.BigEndian, uint32(len(f.Data))); err != nil {
l, err = w.Write(makeUVarint(uint64(len(f.Data)))) return err
n += l
if err != nil {
return n, err
} }
l, err = w.Write(f.Data) _, err = w.Write(f.Data)
n += l
if err != nil { if err != nil {
return n, err return err
} }
if f.IsLast { if f.IsLast {
l, err = w.Write([]byte{1}) if _, err = w.Write([]byte{1}); err != nil {
n += l return err
if err != nil {
return n, err
} }
} else { } else {
l, err = w.Write([]byte{0}) if _, err = w.Write([]byte{0}); err != nil {
n += l return err
if err != nil {
return n, err
} }
} }
return n, nil return nil
} }
type ByteReader interface { type ByteReader interface {
...@@ -87,25 +73,23 @@ type ByteReader interface { ...@@ -87,25 +73,23 @@ type ByteReader interface {
// If `r` fails a read, it returns the error from the reader // If `r` fails a read, it returns the error from the reader
// The reader will be left in a partially read state. // The reader will be left in a partially read state.
func (f *Frame) UnmarshalBinary(r ByteReader) error { func (f *Frame) UnmarshalBinary(r ByteReader) error {
_, err := io.ReadFull(r, f.ID.Data[:]) if _, err := io.ReadFull(r, f.ID.Data[:]); err != nil {
if err != nil {
return fmt.Errorf("error reading ID: %w", err) return fmt.Errorf("error reading ID: %w", err)
} }
f.ID.Time, err = binary.ReadUvarint(r) if err := binary.Read(r, binary.BigEndian, &f.ID.Time); err != nil {
if err != nil { return fmt.Errorf("error reading ID time: %w", err)
return fmt.Errorf("error reading ID.Time: %w", err)
} }
// stop reading and ignore remaining data if we encounter a zeroed ID // stop reading and ignore remaining data if we encounter a zeroed ID
if f.ID == (ChannelID{}) { if f.ID == (ChannelID{}) {
return io.EOF return io.EOF
} }
f.FrameNumber, err = binary.ReadUvarint(r)
if err != nil { if err := binary.Read(r, binary.BigEndian, &f.FrameNumber); err != nil {
return fmt.Errorf("error reading frame number: %w", err) return fmt.Errorf("error reading frame number: %w", err)
} }
frameLength, err := binary.ReadUvarint(r) var frameLength uint32
if err != nil { if err := binary.Read(r, binary.BigEndian, &frameLength); err != nil {
return fmt.Errorf("error reading frame length: %w", err) return fmt.Errorf("error reading frame length: %w", err)
} }
...@@ -118,16 +102,15 @@ func (f *Frame) UnmarshalBinary(r ByteReader) error { ...@@ -118,16 +102,15 @@ func (f *Frame) UnmarshalBinary(r ByteReader) error {
return fmt.Errorf("error reading frame data: %w", err) return fmt.Errorf("error reading frame data: %w", err)
} }
isLastByte, err := r.ReadByte() if isLastByte, err := r.ReadByte(); err != nil && err != io.EOF {
if err != nil && err != io.EOF {
return fmt.Errorf("error reading final byte: %w", err) return fmt.Errorf("error reading final byte: %w", err)
} } else if isLastByte == 0 {
if isLastByte == 0 {
f.IsLast = false f.IsLast = false
return err
} else if isLastByte == 1 { } else if isLastByte == 1 {
f.IsLast = true f.IsLast = true
return err
} else { } else {
return errors.New("invalid byte as is_last") return errors.New("invalid byte as is_last")
} }
return err
} }
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"compress/zlib" "compress/zlib"
"crypto/rand" "crypto/rand"
"encoding/binary"
"errors" "errors"
"io" "io"
...@@ -80,12 +79,6 @@ func (co *ChannelOut) AddBlock(block *types.Block) error { ...@@ -80,12 +79,6 @@ func (co *ChannelOut) AddBlock(block *types.Block) error {
return blockToBatch(block, co.compress) return blockToBatch(block, co.compress)
} }
func makeUVarint(x uint64) []byte {
var tmp [binary.MaxVarintLen64]byte
n := binary.PutUvarint(tmp[:], x)
return tmp[:n]
}
// ReadyBytes returns the number of bytes that the channel out can immediately output into a frame. // ReadyBytes returns the number of bytes that the channel out can immediately output into a frame.
// Use `Flush` or `Close` to move data from the compression buffer into the ready buffer if more bytes // Use `Flush` or `Close` to move data from the compression buffer into the ready buffer if more bytes
// are needed. Add blocks may add to the ready buffer, but it is not guaranteed due to the compression stage. // are needed. Add blocks may add to the ready buffer, but it is not guaranteed due to the compression stage.
...@@ -115,18 +108,18 @@ func (co *ChannelOut) Close() error { ...@@ -115,18 +108,18 @@ func (co *ChannelOut) Close() error {
func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) error { func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) error {
f := Frame{ f := Frame{
ID: co.id, ID: co.id,
FrameNumber: co.frame, FrameNumber: uint16(co.frame),
} }
// Copy data from the local buffer into the frame data buffer // Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize even with the max possible uvarints // Don't go past the maxSize with the fixed frame overhead.
// +1 for single byte of frame content, +1 for lastFrame bool // Fixed overhead: 32 + 8 + 2 + 4 + 1 = 47 bytes.
// +24 for maximum uvarints // Add one extra byte for the version byte (for the entire L1 tx though)
// +32 for the data ID maxDataSize := maxSize - 47 - 1
maxDataSize := maxSize - 32 - 24 - 1 - 1 if maxDataSize > uint64(co.buf.Len()) {
if maxDataSize >= uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len()) maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame, end it. // If we are closed & will not spill past the current frame
// mark it is the final frame of the channel.
if co.closed { if co.closed {
f.IsLast = true f.IsLast = true
} }
...@@ -137,7 +130,7 @@ func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) error { ...@@ -137,7 +130,7 @@ func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) error {
return err return err
} }
if _, err := f.MarshalBinary(w); err != nil { if err := f.MarshalBinary(w); err != nil {
return err return err
} }
......
...@@ -6,6 +6,9 @@ import ( ...@@ -6,6 +6,9 @@ import (
"io" "io"
"time" "time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
...@@ -160,23 +163,33 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error { ...@@ -160,23 +163,33 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error {
} }
fcRes, err := eq.engine.ForkchoiceUpdate(ctx, &fc, nil) fcRes, err := eq.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil { if err != nil {
eq.log.Error("failed to update forkchoice to prepare for new unsafe payload", "err", err) return NewTemporaryError(
return nil // we can try again later err,
fmt.Sprintf("failed to update forkchoice to prepare for new unsafe payload: %v", err),
)
} }
if fcRes.PayloadStatus.Status != eth.ExecutionValid { if fcRes.PayloadStatus.Status != eth.ExecutionValid {
eq.log.Error("cannot prepare unsafe chain for new payload", "new", first.ID(), "parent", first.ParentID(), "err", eth.ForkchoiceUpdateErr(fcRes.PayloadStatus))
eq.unsafePayloads = eq.unsafePayloads[1:] eq.unsafePayloads = eq.unsafePayloads[1:]
return nil return NewTemporaryError(
nil,
fmt.Sprintf("cannot prepare unsafe chain for new payload: new - %v; parent: %v; err: %v",
first.ID(), first.ParentID(), eth.ForkchoiceUpdateErr(fcRes.PayloadStatus)),
)
} }
status, err := eq.engine.NewPayload(ctx, first) status, err := eq.engine.NewPayload(ctx, first)
if err != nil { if err != nil {
eq.log.Error("failed to update insert payload", "err", err) return NewTemporaryError(
return nil // we can try again later err,
fmt.Sprintf("failed to update insert payload: %v", err),
)
} }
if status.Status != eth.ExecutionValid { if status.Status != eth.ExecutionValid {
eq.log.Error("cannot process unsafe payload", "new", first.ID(), "parent", first.ParentID(), "err", eth.ForkchoiceUpdateErr(fcRes.PayloadStatus))
eq.unsafePayloads = eq.unsafePayloads[1:] eq.unsafePayloads = eq.unsafePayloads[1:]
return nil return NewTemporaryError(
nil,
fmt.Sprintf("cannot process unsafe payload: new - %v; parent: %v; err: %v",
first.ID(), first.ParentID(), eth.ForkchoiceUpdateErr(fcRes.PayloadStatus)),
)
} }
eq.unsafeHead = ref eq.unsafeHead = ref
eq.unsafePayloads = eq.unsafePayloads[1:] eq.unsafePayloads = eq.unsafePayloads[1:]
...@@ -207,8 +220,10 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error ...@@ -207,8 +220,10 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
payload, err := eq.engine.PayloadByNumber(ctx, eq.safeHead.Number+1) payload, err := eq.engine.PayloadByNumber(ctx, eq.safeHead.Number+1)
if err != nil { if err != nil {
eq.log.Error("failed to get existing unsafe payload to compare against derived attributes from L1", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to get existing unsafe payload to compare against derived attributes from L1: %v", err),
)
} }
if err := AttributesMatchBlock(eq.safeAttributes[0], eq.safeHead.Hash, payload); err != nil { if err := AttributesMatchBlock(eq.safeAttributes[0], eq.safeHead.Hash, payload); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err) eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err)
...@@ -217,8 +232,10 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error ...@@ -217,8 +232,10 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
} }
ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis) ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis)
if err != nil { if err != nil {
eq.log.Error("failed to decode L2 block ref from payload", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to decode L2 block ref from payload: %v", err),
)
} }
eq.safeHead = ref eq.safeHead = ref
// unsafe head stays the same, we did not reorg the chain. // unsafe head stays the same, we did not reorg the chain.
...@@ -238,22 +255,38 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -238,22 +255,38 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
SafeBlockHash: eq.safeHead.Hash, SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash, FinalizedBlockHash: eq.finalized.Hash,
} }
payload, rpcErr, payloadErr := InsertHeadBlock(ctx, eq.log, eq.engine, fc, eq.safeAttributes[0], true) attrs := eq.safeAttributes[0]
payload, rpcErr, payloadErr := InsertHeadBlock(ctx, eq.log, eq.engine, fc, attrs, true)
if rpcErr != nil { if rpcErr != nil {
// RPC errors are recoverable, we can retry the buffered payload attributes later. // RPC errors are recoverable, we can retry the buffered payload attributes later.
eq.log.Error("failed to insert new block", "err", rpcErr) return NewTemporaryError(
return nil rpcErr,
fmt.Sprintf("failed to insert new block: %v", rpcErr),
)
} }
if payloadErr != nil { if payloadErr != nil {
// invalid payloads are dropped, we move on to the next attributes eq.log.Warn("could not process payload derived from L1 data", "err", payloadErr)
eq.log.Warn("could not derive valid payload from L1 data", "err", payloadErr) // filter everything but the deposits
eq.safeAttributes = eq.safeAttributes[1:] var deposits []hexutil.Bytes
return nil for _, tx := range attrs.Transactions {
if len(tx) > 0 && tx[0] == types.DepositTxType {
deposits = append(deposits, tx)
}
}
if len(attrs.Transactions) > len(deposits) {
eq.log.Warn("dropping sequencer transactions from payload for re-attempt, batcher may have included invalid transactions",
"txs", len(attrs.Transactions), "deposits", len(deposits), "parent", eq.safeHead)
eq.safeAttributes[0].Transactions = deposits
return nil
}
return NewCriticalError(payloadErr, "failed to process block with only deposit transactions")
} }
ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis) ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis)
if err != nil { if err != nil {
eq.log.Error("failed to decode L2 block ref from payload", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to decode L2 block ref from payload: %v", err),
)
} }
eq.safeHead = ref eq.safeHead = ref
eq.unsafeHead = ref eq.unsafeHead = ref
...@@ -269,23 +302,30 @@ func (eq *EngineQueue) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error ...@@ -269,23 +302,30 @@ func (eq *EngineQueue) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error
l2Head, err := eq.engine.L2BlockRefHead(ctx) l2Head, err := eq.engine.L2BlockRefHead(ctx)
if err != nil { if err != nil {
eq.log.Error("failed to find the L2 Head block", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to find the L2 Head block: %v", err),
)
} }
unsafe, safe, err := sync.FindL2Heads(ctx, l2Head, eq.cfg.SeqWindowSize, l1Fetcher, eq.engine, &eq.cfg.Genesis) unsafe, safe, err := sync.FindL2Heads(ctx, l2Head, eq.cfg.SeqWindowSize, l1Fetcher, eq.engine, &eq.cfg.Genesis)
if err != nil { if err != nil {
eq.log.Error("failed to find the L2 Heads to start from", "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to find the L2 Heads to start from: %v", err),
)
} }
l1Origin, err := l1Fetcher.L1BlockRefByHash(ctx, safe.L1Origin.Hash) l1Origin, err := l1Fetcher.L1BlockRefByHash(ctx, safe.L1Origin.Hash)
if err != nil { if err != nil {
eq.log.Error("failed to fetch the new L1 progress", "err", err, "origin", safe.L1Origin) return NewTemporaryError(
return nil err,
fmt.Sprintf("failed to fetch the new L1 progress: origin: %v; err: %v", safe.L1Origin, err),
)
} }
if safe.Time < l1Origin.Time { if safe.Time < l1Origin.Time {
return fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken", return fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken",
safe, safe.Time, l1Origin, l1Origin.Time) safe, safe.Time, l1Origin, l1Origin.Time)
} }
eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin)
eq.unsafeHead = unsafe eq.unsafeHead = unsafe
eq.safeHead = safe eq.safeHead = safe
eq.progress = Progress{ eq.progress = Progress{
......
...@@ -75,6 +75,9 @@ func InsertHeadBlock(ctx context.Context, log log.Logger, eng Engine, fc eth.For ...@@ -75,6 +75,9 @@ func InsertHeadBlock(ctx context.Context, log log.Logger, eng Engine, fc eth.For
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create new block via forkchoice: %w", err), nil return nil, fmt.Errorf("failed to create new block via forkchoice: %w", err), nil
} }
if fcRes.PayloadStatus.Status == eth.ExecutionInvalid || fcRes.PayloadStatus.Status == eth.ExecutionInvalidBlockHash {
return nil, nil, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus)
}
if fcRes.PayloadStatus.Status != eth.ExecutionValid { if fcRes.PayloadStatus.Status != eth.ExecutionValid {
return nil, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus), nil return nil, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus), nil
} }
...@@ -94,6 +97,9 @@ func InsertHeadBlock(ctx context.Context, log log.Logger, eng Engine, fc eth.For ...@@ -94,6 +97,9 @@ func InsertHeadBlock(ctx context.Context, log log.Logger, eng Engine, fc eth.For
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to insert execution payload: %w", err), nil return nil, fmt.Errorf("failed to insert execution payload: %w", err), nil
} }
if status.Status == eth.ExecutionInvalid || status.Status == eth.ExecutionInvalidBlockHash {
return nil, nil, eth.NewPayloadErr(payload, status)
}
if status.Status != eth.ExecutionValid { if status.Status != eth.ExecutionValid {
return nil, eth.NewPayloadErr(payload, status), nil return nil, eth.NewPayloadErr(payload, status), nil
} }
......
package derive
import (
"fmt"
)
// Level is the severity level of the error.
type Level uint
// There are three levels currently, out of which only 2 are being used
// to classify error by severity. LevelTemporary
const (
// LevelTemporary is a temporary error for example due to an RPC or
// connection issue, and can be safely ignored and retried by the caller
LevelTemporary Level = iota
// LevelReset is a pipeline reset error. It must be treated like a reorg.
LevelReset
// LevelCritical is a critical error.
LevelCritical
)
// Error is a wrapper for error, description and a severity level.
type Error struct {
err error
desc string
level Level
}
// Error satisfies the error interface.
func (e Error) Error() string {
if e.err != nil {
return fmt.Errorf("%w: %s", e.err, e.desc).Error()
}
return e.desc
}
// Unwrap satisfies the Is/As interface.
func (e Error) Unwrap() error {
return e.err
}
// Is satisfies the error Unwrap interface.
func (e Error) Is(target error) bool {
if target == nil {
return e == target
}
err, ok := target.(Error)
if !ok {
return false
}
return e.level == err.level
}
// NewError returns a custom Error.
func NewError(err error, desc string, level Level) error {
return Error{
err: err,
desc: desc,
level: level,
}
}
// NewTemporaryError returns a temporary error.
func NewTemporaryError(err error, desc string) error {
return NewError(
err,
desc,
LevelTemporary,
)
}
// NewResetError returns a pipeline reset error.
func NewResetError(err error, desc string) error {
return NewError(
err,
desc,
LevelReset,
)
}
// NewCriticalError returns a critical error.
func NewCriticalError(err error, desc string) error {
return NewError(
err,
desc,
LevelCritical,
)
}
// Sentinel errors, use these to get the severity of errors by calling
// errors.Is(err, ErrTemporary) for example.
var ErrTemporary = NewTemporaryError(nil, "temporary error")
var ErrReset = NewResetError(nil, "pipeline reset error")
var ErrCritical = NewCriticalError(nil, "critical error")
...@@ -2,6 +2,7 @@ package derive ...@@ -2,6 +2,7 @@ package derive
import ( import (
"context" "context"
"fmt"
"io" "io"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
...@@ -65,8 +66,10 @@ func (l1r *L1Retrieval) Step(ctx context.Context, outer Progress) error { ...@@ -65,8 +66,10 @@ func (l1r *L1Retrieval) Step(ctx context.Context, outer Progress) error {
if l1r.datas == nil { if l1r.datas == nil {
datas, err := l1r.dataSrc.OpenData(ctx, l1r.progress.Origin.ID()) datas, err := l1r.dataSrc.OpenData(ctx, l1r.progress.Origin.ID())
if err != nil { if err != nil {
l1r.log.Error("can't fetch L1 data", "origin", l1r.progress.Origin, "err", err) return NewTemporaryError(
return nil err,
fmt.Sprintf("can't fetch L1 data: %v, %v", l1r.progress.Origin, err),
)
} }
l1r.datas = datas l1r.datas = datas
return nil return nil
......
...@@ -56,7 +56,7 @@ func (l1t *L1Traversal) Step(ctx context.Context, outer Progress) error { ...@@ -56,7 +56,7 @@ func (l1t *L1Traversal) Step(ctx context.Context, outer Progress) error {
return nil // nil, don't make the pipeline restart if the RPC fails return nil // nil, don't make the pipeline restart if the RPC fails
} }
if l1t.progress.Origin.Hash != nextL1Origin.ParentHash { if l1t.progress.Origin.Hash != nextL1Origin.ParentHash {
return fmt.Errorf("detected L1 reorg from %s to %s: %w", l1t.progress.Origin, nextL1Origin, ReorgErr) return NewResetError(ReorgErr, fmt.Sprintf("detected L1 reorg from %s to %s", l1t.progress.Origin, nextL1Origin))
} }
l1t.progress.Origin = nextL1Origin l1t.progress.Origin = nextL1Origin
l1t.progress.Closed = false l1t.progress.Closed = false
......
...@@ -24,12 +24,12 @@ func (pr *Progress) Update(outer Progress) (changed bool, err error) { ...@@ -24,12 +24,12 @@ func (pr *Progress) Update(outer Progress) (changed bool, err error) {
if pr.Closed { if pr.Closed {
if outer.Closed { if outer.Closed {
if pr.Origin.ID() != outer.Origin.ID() { if pr.Origin.ID() != outer.Origin.ID() {
return true, fmt.Errorf("outer stage changed origin from %s to %s without opening it", pr.Origin, outer.Origin) return true, NewResetError(ReorgErr, fmt.Sprintf("outer stage changed origin from %s to %s without opening it", pr.Origin, outer.Origin))
} }
return false, nil return false, nil
} else { } else {
if pr.Origin.Hash != outer.Origin.ParentHash { if pr.Origin.Hash != outer.Origin.ParentHash {
return true, fmt.Errorf("detected internal pipeline reorg of L1 origin data from %s to %s: %w", pr.Origin, outer.Origin, ReorgErr) return true, NewResetError(ReorgErr, fmt.Sprintf("detected internal pipeline reorg of L1 origin data from %s to %s", pr.Origin, outer.Origin))
} }
pr.Origin = outer.Origin pr.Origin = outer.Origin
pr.Closed = false pr.Closed = false
...@@ -37,7 +37,7 @@ func (pr *Progress) Update(outer Progress) (changed bool, err error) { ...@@ -37,7 +37,7 @@ func (pr *Progress) Update(outer Progress) (changed bool, err error) {
} }
} else { } else {
if pr.Origin.ID() != outer.Origin.ID() { if pr.Origin.ID() != outer.Origin.ID() {
return true, fmt.Errorf("outer stage changed origin from %s to %s before closing it", pr.Origin, outer.Origin) return true, NewResetError(ReorgErr, fmt.Sprintf("outer stage changed origin from %s to %s before closing it", pr.Origin, outer.Origin))
} }
if outer.Closed { if outer.Closed {
pr.Closed = true pr.Closed = true
......
...@@ -3,14 +3,17 @@ package driver ...@@ -3,14 +3,17 @@ package driver
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
gosync "sync" gosync "sync"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/backoff"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -249,7 +252,8 @@ func (s *state) createNewL2Block(ctx context.Context) error { ...@@ -249,7 +252,8 @@ func (s *state) createNewL2Block(ctx context.Context) error {
if s.network != nil { if s.network != nil {
if err := s.network.PublishL2Payload(ctx, payload); err != nil { if err := s.network.PublishL2Payload(ctx, payload); err != nil {
s.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err) s.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err)
return err s.metrics.PublishingErrorsTotal.Inc()
// publishing of unsafe data via p2p is optional. Errors are not severe enough to change/halt sequencing but should be logged and metered.
} }
} }
...@@ -290,8 +294,15 @@ func (s *state) eventLoop() { ...@@ -290,8 +294,15 @@ func (s *state) eventLoop() {
} }
} }
// reqStep requests a derivation step to be taken. Won't deadlock if the channel is full. // channel, nil by default (not firing), but used to schedule re-attempts with delay
reqStep := func() { var delayedStepReq <-chan time.Time
// keep track of consecutive failed attempts, to adjust the backoff time accordingly
bOffStrategy := backoff.Exponential()
stepAttempts := 0
// step requests a derivation step to be taken. Won't deadlock if the channel is full.
step := func() {
select { select {
case stepReqCh <- struct{}{}: case stepReqCh <- struct{}{}:
// Don't deadlock if the channel is already full // Don't deadlock if the channel is already full
...@@ -299,6 +310,22 @@ func (s *state) eventLoop() { ...@@ -299,6 +310,22 @@ func (s *state) eventLoop() {
} }
} }
// reqStep requests a derivation step nicely, with a delay if this is a reattempt, or not at all if we already scheduled a reattempt.
reqStep := func() {
if stepAttempts > 0 {
// if this is not the first attempt, we re-schedule with a backoff, *without blocking other events*
if delayedStepReq == nil {
delay := bOffStrategy.Duration(stepAttempts)
s.log.Debug("scheduling re-attempt with delay", "attempts", stepAttempts, "delay", delay)
delayedStepReq = time.After(delay)
} else {
s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", stepAttempts)
}
} else {
step()
}
}
// We call reqStep right away to finish syncing to the tip of the chain if we're behind. // We call reqStep right away to finish syncing to the tip of the chain if we're behind.
// reqStep will also be triggered when the L1 head moves forward or if there was a reorg on the // reqStep will also be triggered when the L1 head moves forward or if there was a reorg on the
// L1 chain that we need to handle. // L1 chain that we need to handle.
...@@ -322,7 +349,8 @@ func (s *state) eventLoop() { ...@@ -322,7 +349,8 @@ func (s *state) eventLoop() {
cancel() cancel()
if err != nil { if err != nil {
s.log.Error("Error creating new L2 block", "err", err) s.log.Error("Error creating new L2 block", "err", err)
s.metrics.DerivationErrorsTotal.Inc() s.metrics.SequencingErrorsTotal.Inc()
break // if we fail, we wait for the next block creation trigger.
} }
// We need to catch up to the next origin as quickly as possible. We can do this by // We need to catch up to the next origin as quickly as possible. We can do this by
...@@ -346,24 +374,42 @@ func (s *state) eventLoop() { ...@@ -346,24 +374,42 @@ func (s *state) eventLoop() {
s.snapshot("New L1 Head") s.snapshot("New L1 Head")
s.handleNewL1Block(newL1Head) s.handleNewL1Block(newL1Head)
reqStep() // a new L1 head may mean we have the data to not get an EOF again. reqStep() // a new L1 head may mean we have the data to not get an EOF again.
case <-delayedStepReq:
delayedStepReq = nil
step()
case <-stepReqCh: case <-stepReqCh:
s.metrics.SetDerivationIdle(false) s.metrics.SetDerivationIdle(false)
s.idleDerivation = false s.idleDerivation = false
s.log.Debug("Derivation process step", "onto_origin", s.derivation.Progress().Origin, "onto_closed", s.derivation.Progress().Closed) s.log.Debug("Derivation process step", "onto_origin", s.derivation.Progress().Origin, "onto_closed", s.derivation.Progress().Closed, "attempts", stepAttempts)
stepCtx, cancel := context.WithTimeout(ctx, time.Second*10) // TODO pick a timeout for executing a single step stepCtx, cancel := context.WithTimeout(ctx, time.Second*10) // TODO pick a timeout for executing a single step
err := s.derivation.Step(stepCtx) err := s.derivation.Step(stepCtx)
cancel() cancel()
stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress.
if err == io.EOF { if err == io.EOF {
s.log.Debug("Derivation process went idle", "progress", s.derivation.Progress().Origin) s.log.Debug("Derivation process went idle", "progress", s.derivation.Progress().Origin)
s.idleDerivation = true s.idleDerivation = true
stepAttempts = 0
s.metrics.SetDerivationIdle(true) s.metrics.SetDerivationIdle(true)
continue continue
} else if err != nil { } else if err != nil && errors.Is(err, derive.ErrReset) {
// If the pipeline corrupts, e.g. due to a reorg, simply reset it // If the pipeline corrupts, e.g. due to a reorg, simply reset it
s.log.Warn("Derivation pipeline is reset", "err", err) s.log.Warn("Derivation pipeline is reset", "err", err)
s.derivation.Reset() s.derivation.Reset()
s.metrics.RecordPipelineReset() s.metrics.RecordPipelineReset()
continue
} else if err != nil && errors.Is(err, derive.ErrTemporary) {
s.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err)
reqStep()
continue
} else if err != nil && errors.Is(err, derive.ErrCritical) {
s.log.Error("Derivation process critical error", "err", err)
return
} else if err != nil {
s.log.Error("Derivation process error", "attempts", stepAttempts, "err", err)
reqStep()
continue
} else { } else {
stepAttempts = 0
finalized, safe, unsafe := s.derivation.Finalized(), s.derivation.SafeL2Head(), s.derivation.UnsafeL2Head() finalized, safe, unsafe := s.derivation.Finalized(), s.derivation.SafeL2Head(), s.derivation.UnsafeL2Head()
// log sync progress when it changes // log sync progress when it changes
if s.l2Finalized != finalized || s.l2SafeHead != safe || s.l2Head != unsafe { if s.l2Finalized != finalized || s.l2SafeHead != safe || s.l2Head != unsafe {
......
...@@ -25,7 +25,7 @@ func (d *outputImpl) createNewBlock(ctx context.Context, l2Head eth.L2BlockRef, ...@@ -25,7 +25,7 @@ func (d *outputImpl) createNewBlock(ctx context.Context, l2Head eth.L2BlockRef,
fetchCtx, cancel := context.WithTimeout(ctx, time.Second*20) fetchCtx, cancel := context.WithTimeout(ctx, time.Second*20)
defer cancel() defer cancel()
attrs, _, err := derive.PreparePayloadAttributes(fetchCtx, d.Config, d.dl, l2Head, l2Head.Time+d.Config.BlockTime, l1Origin.ID()) attrs, err := derive.PreparePayloadAttributes(fetchCtx, d.Config, d.dl, l2Head, l2Head.Time+d.Config.BlockTime, l1Origin.ID())
if err != nil { if err != nil {
return l2Head, nil, err return l2Head, nil, err
} }
......
...@@ -35,6 +35,7 @@ COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast ...@@ -35,6 +35,7 @@ COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast
WORKDIR /opt/optimism WORKDIR /opt/optimism
COPY *.json yarn.lock ./ COPY *.json yarn.lock ./
COPY packages/sdk/package.json ./packages/sdk/package.json COPY packages/sdk/package.json ./packages/sdk/package.json
COPY packages/actor-tests/package.json ./packages/actor-tests/package.json
COPY packages/core-utils/package.json ./packages/core-utils/package.json COPY packages/core-utils/package.json ./packages/core-utils/package.json
COPY packages/common-ts/package.json ./packages/common-ts/package.json COPY packages/common-ts/package.json ./packages/common-ts/package.json
COPY packages/contracts/package.json ./packages/contracts/package.json COPY packages/contracts/package.json ./packages/contracts/package.json
...@@ -57,6 +58,9 @@ COPY ./integration-tests ./integration-tests ...@@ -57,6 +58,9 @@ COPY ./integration-tests ./integration-tests
# build it! # build it!
RUN yarn build RUN yarn build
FROM base as actor-tests-bedrock
WORKDIR /opt/optimism/packages/actor-tests
ENTRYPOINT ["yarn", "run:bedrock"]
FROM base as deployer FROM base as deployer
WORKDIR /opt/optimism/packages/contracts WORKDIR /opt/optimism/packages/contracts
......
...@@ -2,7 +2,7 @@ FROM ethereum/client-go:alltools-v1.10.17 as geth ...@@ -2,7 +2,7 @@ FROM ethereum/client-go:alltools-v1.10.17 as geth
FROM ethereumoptimism/foundry:latest as foundry FROM ethereumoptimism/foundry:latest as foundry
FROM python:3.8.12-slim-buster FROM python:3.8.13-slim-bullseye
ENV GOPATH /go ENV GOPATH /go
ENV PATH /usr/local/go/bin:$GOPATH/bin:$PATH ENV PATH /usr/local/go/bin:$GOPATH/bin:$PATH
......
ignores: [
"typescript"
]
module.exports = {
extends: '../../.eslintrc.js',
}
module.exports = {
...require('../../.prettierrc.js'),
}
(The MIT License)
Copyright 2020-2021 Optimism
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import { Wallet, utils } from 'ethers'
import { expect } from 'chai'
import { actor, setupActor, run, setupRun } from '../lib/convenience'
import { devWalletsL2 } from './utils'
interface Context {
wallet: Wallet
}
actor('Sender', () => {
let sourceWallet: Wallet
let destWallet: Wallet
setupActor(async () => {
const devWallets = devWalletsL2()
sourceWallet = devWallets[0]
destWallet = devWallets[1]
})
setupRun(async () => {
const newWallet = Wallet.createRandom().connect(sourceWallet.provider)
const tx = await sourceWallet.sendTransaction({
to: newWallet.address,
value: utils.parseEther('0.1'),
})
await tx.wait()
return {
wallet: newWallet,
}
})
run(async (b, ctx: Context, logger) => {
const { wallet } = ctx
logger.log(`Sending funds to ${destWallet.address}.`)
const tx = await wallet.sendTransaction({
to: destWallet.address,
value: 0x42,
})
logger.log(`Awaiting receipt for send tx ${tx.hash}.`)
const receipt = await tx.wait()
expect(receipt.status).to.eq(1)
logger.log(`Send completed in block ${receipt.blockNumber}.`)
})
})
{
"_format": "hh-sol-artifact-1",
"contractName": "ERC20",
"sourceName": "contracts/ERC20.sol",
"abi": [
{
"inputs": [
{
"internalType": "uint256",
"name": "_initialAmount",
"type": "uint256"
},
{
"internalType": "string",
"name": "_tokenName",
"type": "string"
},
{
"internalType": "uint8",
"name": "_decimalUnits",
"type": "uint8"
},
{
"internalType": "string",
"name": "_tokenSymbol",
"type": "string"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "_from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
},
{
"internalType": "address",
"name": "_spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "remaining",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
},
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "allowed",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "success",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "balance",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "balances",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "destroy",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "success",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_from",
"type": "address"
},
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "success",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
}
],
"bytecode": "0x60806040523480156200001157600080fd5b5060405162000a0c38038062000a0c833981016040819052620000349162000203565b336000908152602081815260409091208590556005859055835162000060916002919086019062000090565b506003805460ff191660ff841617905580516200008590600490602084019062000090565b5050505050620002cf565b8280546200009e9062000292565b90600052602060002090601f016020900481019282620000c257600085556200010d565b82601f10620000dd57805160ff19168380011785556200010d565b828001600101855582156200010d579182015b828111156200010d578251825591602001919060010190620000f0565b506200011b9291506200011f565b5090565b5b808211156200011b576000815560010162000120565b634e487b7160e01b600052604160045260246000fd5b600082601f8301126200015e57600080fd5b81516001600160401b03808211156200017b576200017b62000136565b604051601f8301601f19908116603f01168101908282118183101715620001a657620001a662000136565b81604052838152602092508683858801011115620001c357600080fd5b600091505b83821015620001e75785820183015181830184015290820190620001c8565b83821115620001f95760008385830101525b9695505050505050565b600080600080608085870312156200021a57600080fd5b845160208601519094506001600160401b03808211156200023a57600080fd5b62000248888389016200014c565b94506040870151915060ff821682146200026157600080fd5b6060870151919350808211156200027757600080fd5b5062000286878288016200014c565b91505092959194509250565b600181811c90821680620002a757607f821691505b60208210811415620002c957634e487b7160e01b600052602260045260246000fd5b50919050565b61072d80620002df6000396000f3fe608060405234801561001057600080fd5b50600436106100b45760003560e01c80635c658165116100715780635c6581651461016357806370a082311461018e57806383197ef0146101b757806395d89b41146101bf578063a9059cbb146101c7578063dd62ed3e146101da57600080fd5b806306fdde03146100b9578063095ea7b3146100d757806318160ddd146100fa57806323b872dd1461011157806327e235e314610124578063313ce56714610144575b600080fd5b6100c1610213565b6040516100ce9190610574565b60405180910390f35b6100ea6100e53660046105e5565b6102a1565b60405190151581526020016100ce565b61010360055481565b6040519081526020016100ce565b6100ea61011f36600461060f565b61030d565b61010361013236600461064b565b60006020819052908152604090205481565b6003546101519060ff1681565b60405160ff90911681526020016100ce565b61010361017136600461066d565b600160209081526000928352604080842090915290825290205481565b61010361019c36600461064b565b6001600160a01b031660009081526020819052604090205490565b6101bd33ff5b005b6100c1610483565b6100ea6101d53660046105e5565b610490565b6101036101e836600461066d565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b60028054610220906106a0565b80601f016020809104026020016040519081016040528092919081815260200182805461024c906106a0565b80156102995780601f1061026e57610100808354040283529160200191610299565b820191906000526020600020905b81548152906001019060200180831161027c57829003601f168201915b505050505081565b3360008181526001602090815260408083206001600160a01b038716808552925280832085905551919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925906102fc9086815260200190565b60405180910390a350600192915050565b6001600160a01b038316600081815260016020908152604080832033845282528083205493835290829052812054909190831180159061034d5750828110155b61038e5760405162461bcd60e51b815260206004820152600d60248201526c62616420616c6c6f77616e636560981b60448201526064015b60405180910390fd5b6001600160a01b038416600090815260208190526040812080548592906103b69084906106f1565b90915550506001600160a01b038516600090815260208190526040812080548592906103e3908490610709565b909155505060001981101561042b576001600160a01b038516600090815260016020908152604080832033845290915281208054859290610425908490610709565b90915550505b836001600160a01b0316856001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8560405161047091815260200190565b60405180910390a3506001949350505050565b60048054610220906106a0565b336000908152602081905260408120548211156104e65760405162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b6044820152606401610385565b3360009081526020819052604081208054849290610505908490610709565b90915550506001600160a01b038316600090815260208190526040812080548492906105329084906106f1565b90915550506040518281526001600160a01b0384169033907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef906020016102fc565b600060208083528351808285015260005b818110156105a157858101830151858201604001528201610585565b818111156105b3576000604083870101525b50601f01601f1916929092016040019392505050565b80356001600160a01b03811681146105e057600080fd5b919050565b600080604083850312156105f857600080fd5b610601836105c9565b946020939093013593505050565b60008060006060848603121561062457600080fd5b61062d846105c9565b925061063b602085016105c9565b9150604084013590509250925092565b60006020828403121561065d57600080fd5b610666826105c9565b9392505050565b6000806040838503121561068057600080fd5b610689836105c9565b9150610697602084016105c9565b90509250929050565b600181811c908216806106b457607f821691505b602082108114156106d557634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b60008219821115610704576107046106db565b500190565b60008282101561071b5761071b6106db565b50039056fea164736f6c6343000809000a",
"deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100b45760003560e01c80635c658165116100715780635c6581651461016357806370a082311461018e57806383197ef0146101b757806395d89b41146101bf578063a9059cbb146101c7578063dd62ed3e146101da57600080fd5b806306fdde03146100b9578063095ea7b3146100d757806318160ddd146100fa57806323b872dd1461011157806327e235e314610124578063313ce56714610144575b600080fd5b6100c1610213565b6040516100ce9190610574565b60405180910390f35b6100ea6100e53660046105e5565b6102a1565b60405190151581526020016100ce565b61010360055481565b6040519081526020016100ce565b6100ea61011f36600461060f565b61030d565b61010361013236600461064b565b60006020819052908152604090205481565b6003546101519060ff1681565b60405160ff90911681526020016100ce565b61010361017136600461066d565b600160209081526000928352604080842090915290825290205481565b61010361019c36600461064b565b6001600160a01b031660009081526020819052604090205490565b6101bd33ff5b005b6100c1610483565b6100ea6101d53660046105e5565b610490565b6101036101e836600461066d565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b60028054610220906106a0565b80601f016020809104026020016040519081016040528092919081815260200182805461024c906106a0565b80156102995780601f1061026e57610100808354040283529160200191610299565b820191906000526020600020905b81548152906001019060200180831161027c57829003601f168201915b505050505081565b3360008181526001602090815260408083206001600160a01b038716808552925280832085905551919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925906102fc9086815260200190565b60405180910390a350600192915050565b6001600160a01b038316600081815260016020908152604080832033845282528083205493835290829052812054909190831180159061034d5750828110155b61038e5760405162461bcd60e51b815260206004820152600d60248201526c62616420616c6c6f77616e636560981b60448201526064015b60405180910390fd5b6001600160a01b038416600090815260208190526040812080548592906103b69084906106f1565b90915550506001600160a01b038516600090815260208190526040812080548592906103e3908490610709565b909155505060001981101561042b576001600160a01b038516600090815260016020908152604080832033845290915281208054859290610425908490610709565b90915550505b836001600160a01b0316856001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8560405161047091815260200190565b60405180910390a3506001949350505050565b60048054610220906106a0565b336000908152602081905260408120548211156104e65760405162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b6044820152606401610385565b3360009081526020819052604081208054849290610505908490610709565b90915550506001600160a01b038316600090815260208190526040812080548492906105329084906106f1565b90915550506040518281526001600160a01b0384169033907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef906020016102fc565b600060208083528351808285015260005b818110156105a157858101830151858201604001528201610585565b818111156105b3576000604083870101525b50601f01601f1916929092016040019392505050565b80356001600160a01b03811681146105e057600080fd5b919050565b600080604083850312156105f857600080fd5b610601836105c9565b946020939093013593505050565b60008060006060848603121561062457600080fd5b61062d846105c9565b925061063b602085016105c9565b9150604084013590509250925092565b60006020828403121561065d57600080fd5b610666826105c9565b9392505050565b6000806040838503121561068057600080fd5b610689836105c9565b9150610697602084016105c9565b90509250929050565b600181811c908216806106b457607f821691505b602082108114156106d557634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b60008219821115610704576107046106db565b500190565b60008282101561071b5761071b6106db565b50039056fea164736f6c6343000809000a",
"linkReferences": {},
"deployedLinkReferences": {}
}
import { Wallet, ContractFactory } from 'ethers'
import { actor, setupActor, run } from '../lib/convenience'
import { devWalletsL2 } from './utils'
import * as ERC20 from './contracts/ERC20.json'
actor('Deployer', () => {
let wallets: Wallet[]
setupActor(async () => {
wallets = devWalletsL2()
})
run(async (b, ctx, logger) => {
const sender = wallets[Math.floor(Math.random() * wallets.length)]
const contract = new ContractFactory(ERC20.abi, ERC20.bytecode).connect(
sender
)
logger.log(`Deploying contract with ${sender.address}.`)
const deployment = await contract.deploy(
Math.floor(1_000_000 * Math.random()),
'Test Token',
18,
'OP'
)
logger.log(
`Awaiting receipt for deployment tx ${deployment.deployTransaction.hash}.`
)
await deployment.deployed()
const receipt = await sender.provider.getTransactionReceipt(
deployment.deployTransaction.hash
)
logger.log(`Deployment completed in block ${receipt.blockNumber}.`)
})
})
import { utils, Wallet, providers, constants } from 'ethers'
import {
CrossChainMessenger,
ETHBridgeAdapter,
StandardBridgeAdapter,
} from '@eth-optimism/sdk'
import { predeploys } from '@eth-optimism/contracts-bedrock'
import { sleep } from '@eth-optimism/core-utils'
import { actor, setupActor, run } from '../lib/convenience'
interface Context {
wallet: Wallet
}
actor('Dev account sender', () => {
let l1Provider: providers.JsonRpcProvider
let l2Provider: providers.JsonRpcProvider
let wallet: Wallet
let messenger: CrossChainMessenger
let contracts: any
setupActor(async () => {
l1Provider = new providers.JsonRpcProvider(process.env.L1_RPC)
l2Provider = new providers.JsonRpcProvider(process.env.L2_RPC)
wallet = new Wallet(process.env.PRIVATE_KEY)
contracts = require(process.env.CONTRACTS_JSON_PATH)
messenger = new CrossChainMessenger({
l1SignerOrProvider: wallet.connect(l1Provider),
l2SignerOrProvider: wallet.connect(l2Provider),
l1ChainId: (await l1Provider.getNetwork()).chainId,
l2ChainId: (await l2Provider.getNetwork()).chainId,
bridges: {
Standard: {
Adapter: StandardBridgeAdapter,
l1Bridge: contracts.L1StandardBridgeProxy,
l2Bridge: predeploys.L2StandardBridge,
},
ETH: {
Adapter: ETHBridgeAdapter,
l1Bridge: contracts.L1StandardBridgeProxy,
l2Bridge: predeploys.L2StandardBridge,
},
},
contracts: {
l1: {
AddressManager: constants.AddressZero,
StateCommitmentChain: constants.AddressZero,
CanonicalTransactionChain: constants.AddressZero,
BondManager: constants.AddressZero,
L1StandardBridge: contracts.L1StandardBridgeProxy,
L1CrossDomainMessenger: contracts.L1CrossDomainMessengerProxy,
L2OutputOracle: contracts.L2OutputOracleProxy,
OptimismPortal: contracts.OptimismPortalProxy,
},
},
bedrock: true,
})
})
run(async (b, ctx: Context, logger) => {
const recipient = Wallet.createRandom().connect(l2Provider)
logger.log(`Depositing funds to ${recipient.address}.`)
const depositTx = await messenger.depositETH(utils.parseEther('0.0001'), {
recipient: recipient.address,
})
logger.log(`Awaiting receipt for deposit tx ${depositTx.hash}.`)
await depositTx.wait()
// Temporary until this is supported in the SDK.
for (let i = 0; i < 60; i++) {
const recipBal = await recipient.getBalance()
logger.log(`Polling L2 for deposit completion.`)
if (recipBal.eq(utils.parseEther('0.0001'))) {
logger.log('Deposit successful.')
return
}
await sleep(1000)
}
throw new Error('Timed out.')
})
})
import { providers, Wallet } from 'ethers'
const DEV_MNEMONIC =
'test test test test test test test test test test test junk'
export const devWalletsL2 = () => {
const provider = new providers.JsonRpcProvider(process.env.L2_RPC)
const wallets = []
for (let i = 0; i < 20; i++) {
wallets.push(
Wallet.fromMnemonic(DEV_MNEMONIC, `m/44'/60'/0'/0/${i}`).connect(provider)
)
}
return wallets
}
...@@ -22,7 +22,11 @@ export interface Bencher { ...@@ -22,7 +22,11 @@ export interface Bencher {
bench: (name: string, cb: () => Promise<any>) => Promise<any> bench: (name: string, cb: () => Promise<any>) => Promise<any>
} }
export type RunCB<C> = (b: Bencher, ctx: C) => Promise<void> export type RunCB<C> = (
b: Bencher,
ctx: C,
logger: WorkerLogger
) => Promise<void>
export interface RunOpts { export interface RunOpts {
runs: number | null runs: number | null
...@@ -138,7 +142,7 @@ export class Runner { ...@@ -138,7 +142,7 @@ export class Runner {
} }
try { try {
await this.actor.run(this.stepper, ctx) await this.actor.run(this.stepper, ctx, this.logger)
} catch (e) { } catch (e) {
console.error('Error in actor run:') console.error('Error in actor run:')
console.error(`Benchmark name: ${actor.name}`) console.error(`Benchmark name: ${actor.name}`)
...@@ -147,6 +151,7 @@ export class Runner { ...@@ -147,6 +151,7 @@ export class Runner {
console.error('Stack trace:') console.error('Stack trace:')
console.error(e) console.error(e)
failedActorRunsTotal.inc(metricLabels) failedActorRunsTotal.inc(metricLabels)
await sleep(1000)
continue continue
} }
...@@ -154,7 +159,10 @@ export class Runner { ...@@ -154,7 +159,10 @@ export class Runner {
i++ i++
if (opts.runs && (i % 10 === 0 || i === opts.runs)) { if (
(opts.runs && (i % 10 === 0 || i === opts.runs)) ||
now - lastDurPrint > 10000
) {
this.logger.log(`Completed run ${i} of ${opts.runs}.`) this.logger.log(`Completed run ${i} of ${opts.runs}.`)
} }
...@@ -185,7 +193,8 @@ export class Actor { ...@@ -185,7 +193,8 @@ export class Actor {
private _tearDownRun: <C>(ctx: C) => Promise<void> = asyncNoop as any private _tearDownRun: <C>(ctx: C) => Promise<void> = asyncNoop as any
// eslint-disable-next-line @typescript-eslint/no-empty-function // eslint-disable-next-line @typescript-eslint/no-empty-function
private _run: <C>(b: Bencher, ctx: C) => Promise<void> = asyncNoop private _run: <C>(b: Bencher, ctx: C, logger: WorkerLogger) => Promise<void> =
asyncNoop
private logger: ActorLogger private logger: ActorLogger
......
...@@ -5,7 +5,7 @@ import { Command } from 'commander' ...@@ -5,7 +5,7 @@ import { Command } from 'commander'
import { defaultRuntime } from './convenience' import { defaultRuntime } from './convenience'
import { RunOpts } from './actor' import { RunOpts } from './actor'
import { serveMetrics } from './metrics' import { serveMetrics } from './metrics'
import pkg from '../../package.json' import pkg from '../package.json'
const program = new Command() const program = new Command()
program.version(pkg.version) program.version(pkg.version)
......
{
"name": "@eth-optimism/actor-tests",
"version": "0.0.1",
"description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT",
"author": "",
"main": "index.js",
"directories": {
"lib": "lib"
},
"scripts": {
"lint": "yarn lint:fix && yarn lint:check",
"lint:check": "eslint . --max-warnings=0",
"lint:fix": "yarn lint:check --fix",
"pre-commit": "lint-staged",
"run:bedrock": "ts-node ./lib/runner.ts -f",
"test": "echo 'No tests specified.'",
"test:coverage": "yarn test"
},
"dependencies": {
"@eth-optimism/contracts-bedrock": "0.5.2",
"@eth-optimism/core-utils": "^0.9.2",
"@eth-optimism/sdk": "^1.3.1",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2",
"chai": "^4.3.4",
"chai-as-promised": "^7.1.1",
"commander": "^8.3.0",
"eslint": "^7.27.0",
"eslint-config-prettier": "^8.3.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-jsdoc": "^35.1.2",
"eslint-plugin-prefer-arrow": "^1.2.3",
"eslint-plugin-prettier": "^3.4.0",
"ethers": "^5.6.9",
"prom-client": "^14.0.1",
"typescript": "^4.3.5"
}
}
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"resolveJsonModule": true
},
"include": [
"./lib",
"./bedrock/**/*.(ts|json)",
"./package.json"
]
}
...@@ -176,8 +176,8 @@ export const BRIDGE_ADAPTER_DATA: { ...@@ -176,8 +176,8 @@ export const BRIDGE_ADAPTER_DATA: {
[L2ChainID.OPTIMISM_KOVAN]: { [L2ChainID.OPTIMISM_KOVAN]: {
wstETH: { wstETH: {
Adapter: DAIBridgeAdapter, Adapter: DAIBridgeAdapter,
l1Bridge: '0xa88751C0a08623E11ff38c6B70F2BbEe7865C17c' as const, l1Bridge: '0x65321bf24210b81500230dCEce14Faa70a9f50a7' as const,
l2Bridge: '0xF9C842dE4381a70eB265d10CF8D43DceFF5bA935' as const, l2Bridge: '0x2E34e7d705AfaC3C4665b6feF31Aa394A1c81c92' as const,
}, },
BitBTC: { BitBTC: {
Adapter: StandardBridgeAdapter, Adapter: StandardBridgeAdapter,
......
...@@ -265,19 +265,21 @@ frame = channel_id ++ frame_number ++ frame_data_length ++ frame_data ++ is_last ...@@ -265,19 +265,21 @@ frame = channel_id ++ frame_number ++ frame_data_length ++ frame_data ++ is_last
channel_id = random ++ timestamp channel_id = random ++ timestamp
random = bytes32 random = bytes32
timestamp = uvarint timestamp = uint64
frame_number = uvarint frame_number = uint16
frame_data_length = uvarint frame_data_length = uint32
frame_data = bytes frame_data = bytes
is_last = bool is_last = bool
Where `uint64`, `uint32` and `uint16` are all big-endian unsigned integers.
``` ```
> **TODO** replace `uvarint` by fixed size integers All data in a frame is fixed-size, except the `frame_data`. The fixed overhead is `32 + 8 + 2 + 4 + 1 = 47 bytes`.
Fixed-size frame metadata avoids a circular dependency with the target total data length,
to simplify packing of frames with varying content length.
where: where:
- `uvarint` is a variable-length encoding of a 64-bit unsigned integer into between 1 and 9 bytes, [as specified in
SQLite 4][sqlite-uvarint].
- `channel_id` uniquely identifies a channel as the concatenation of a random value and a timestamp - `channel_id` uniquely identifies a channel as the concatenation of a random value and a timestamp
- `random` is a random value such that two channels with different batches should have a different random value - `random` is a random value such that two channels with different batches should have a different random value
- `timestamp` is the time at which the channel was created (UNIX time in seconds) - `timestamp` is the time at which the channel was created (UNIX time in seconds)
...@@ -290,7 +292,7 @@ where: ...@@ -290,7 +292,7 @@ where:
margin. (A soft constraint is not a consensus rule — nodes will accept such blocks in the canonical chain but will margin. (A soft constraint is not a consensus rule — nodes will accept such blocks in the canonical chain but will
not attempt to build directly on them.) not attempt to build directly on them.)
- `frame_number` identifies the index of the frame within the channel - `frame_number` identifies the index of the frame within the channel
- `frame_data_length` is the length of `frame_data` in bytes - `frame_data_length` is the length of `frame_data` in bytes. It is capped to 1,000,000 bytes.
- `frame_data` is a sequence of bytes belonging to the channel, logically after the bytes from the previous frames - `frame_data` is a sequence of bytes belonging to the channel, logically after the bytes from the previous frames
- `is_last` is a single byte with a value of 1 if the frame is the last in the channel, 0 if there are frames in the - `is_last` is a single byte with a value of 1 if the frame is the last in the channel, 0 if there are frames in the
channel. Any other value makes the frame invalid (it must be ignored by the rollup node). channel. Any other value makes the frame invalid (it must be ignored by the rollup node).
...@@ -302,8 +304,7 @@ where: ...@@ -302,8 +304,7 @@ where:
> - Do we drop the channel or just the first frame? End result is the same but this changes the channel bank size, which > - Do we drop the channel or just the first frame? End result is the same but this changes the channel bank size, which
> can influence things down the line!! > can influence things down the line!!
[sqlite-uvarint]: https://www.sqlite.org/src4/doc/trunk/www/varint.wiki [batcher-spec]: batching.md
[batcher-spec]: batcher.md
### Channel Format ### Channel Format
......
...@@ -652,6 +652,27 @@ ...@@ -652,6 +652,27 @@
minimatch "^3.1.2" minimatch "^3.1.2"
strip-json-comments "^3.1.1" strip-json-comments "^3.1.1"
"@eth-optimism/contracts-bedrock@0.5.2":
version "0.5.2"
resolved "https://registry.yarnpkg.com/@eth-optimism/contracts-bedrock/-/contracts-bedrock-0.5.2.tgz#9e7c364afe0791e54f311d3a2135623448881936"
integrity sha512-5QFEmudbU9Q6rOxZibPRTqW6q/gKQ7H6toC9v645pYJ4Aw2mB+FIQDbeJ4hDLwV/GRjaHXOuU9uGEqLMk+y8Cw==
dependencies:
"@eth-optimism/core-utils" "^0.9.2"
"@ethereumjs/trie" "^5.0.0-beta.1"
"@ethereumjs/util" "^8.0.0-beta.1"
"@openzeppelin/contracts" "^4.5.0"
"@openzeppelin/contracts-upgradeable" "^4.5.2"
"@rari-capital/solmate" "https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc"
bip39 "^3.0.4"
ds-test "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5"
ethereumjs-wallet "^1.0.2"
ethers "^5.6.8"
excessively-safe-call "https://github.com/nomad-xyz/ExcessivelySafeCall.git#4fcdfd3593d21381f696c790fa6180b8ef559c1e"
forge-std "https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa"
hardhat "^2.9.6"
merkle-patricia-tree "^4.2.4"
rlp "^2.2.7"
"@ethereum-waffle/chai@^3.4.0": "@ethereum-waffle/chai@^3.4.0":
version "3.4.0" version "3.4.0"
resolved "https://registry.yarnpkg.com/@ethereum-waffle/chai/-/chai-3.4.0.tgz#2477877410a96bf370edd64df905b04fb9aba9d5" resolved "https://registry.yarnpkg.com/@ethereum-waffle/chai/-/chai-3.4.0.tgz#2477877410a96bf370edd64df905b04fb9aba9d5"
...@@ -951,6 +972,21 @@ ...@@ -951,6 +972,21 @@
"@ethersproject/properties" "^5.6.0" "@ethersproject/properties" "^5.6.0"
"@ethersproject/strings" "^5.6.1" "@ethersproject/strings" "^5.6.1"
"@ethersproject/abi@5.6.4":
version "5.6.4"
resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.6.4.tgz#f6e01b6ed391a505932698ecc0d9e7a99ee60362"
integrity sha512-TTeZUlCeIHG6527/2goZA6gW5F8Emoc7MrZDC7hhP84aRGvW3TEdTnZR08Ls88YXM1m2SuK42Osw/jSi3uO8gg==
dependencies:
"@ethersproject/address" "^5.6.1"
"@ethersproject/bignumber" "^5.6.2"
"@ethersproject/bytes" "^5.6.1"
"@ethersproject/constants" "^5.6.1"
"@ethersproject/hash" "^5.6.1"
"@ethersproject/keccak256" "^5.6.1"
"@ethersproject/logger" "^5.6.0"
"@ethersproject/properties" "^5.6.0"
"@ethersproject/strings" "^5.6.1"
"@ethersproject/abi@^5.0.12": "@ethersproject/abi@^5.0.12":
version "5.4.1" version "5.4.1"
resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.4.1.tgz#6ac28fafc9ef6f5a7a37e30356a2eb31fa05d39b" resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.4.1.tgz#6ac28fafc9ef6f5a7a37e30356a2eb31fa05d39b"
...@@ -1298,6 +1334,13 @@ ...@@ -1298,6 +1334,13 @@
dependencies: dependencies:
"@ethersproject/logger" "^5.6.0" "@ethersproject/logger" "^5.6.0"
"@ethersproject/networks@5.6.4":
version "5.6.4"
resolved "https://registry.yarnpkg.com/@ethersproject/networks/-/networks-5.6.4.tgz#51296d8fec59e9627554f5a8a9c7791248c8dc07"
integrity sha512-KShHeHPahHI2UlWdtDMn2lJETcbtaJge4k7XSjDR9h79QTd6yQJmv6Cp2ZA4JdqWnhszAOLSuJEd9C0PRw7hSQ==
dependencies:
"@ethersproject/logger" "^5.6.0"
"@ethersproject/pbkdf2@5.4.0", "@ethersproject/pbkdf2@^5.4.0": "@ethersproject/pbkdf2@5.4.0", "@ethersproject/pbkdf2@^5.4.0":
version "5.4.0" version "5.4.0"
resolved "https://registry.yarnpkg.com/@ethersproject/pbkdf2/-/pbkdf2-5.4.0.tgz#ed88782a67fda1594c22d60d0ca911a9d669641c" resolved "https://registry.yarnpkg.com/@ethersproject/pbkdf2/-/pbkdf2-5.4.0.tgz#ed88782a67fda1594c22d60d0ca911a9d669641c"
...@@ -2821,6 +2864,11 @@ ...@@ -2821,6 +2864,11 @@
resolved "https://registry.yarnpkg.com/@openzeppelin/contracts-upgradeable/-/contracts-upgradeable-4.7.1.tgz#f63fc384255d6ac139e0a2561aa207fd7c14183c" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts-upgradeable/-/contracts-upgradeable-4.7.1.tgz#f63fc384255d6ac139e0a2561aa207fd7c14183c"
integrity sha512-5EFiZld3DYFd8aTL8eeMnhnaWh1/oXLXFNuFMrgF3b1DNPshF3LCyO7VR6lc+gac2URJ0BlVcZoCfkk/3MoEfg== integrity sha512-5EFiZld3DYFd8aTL8eeMnhnaWh1/oXLXFNuFMrgF3b1DNPshF3LCyO7VR6lc+gac2URJ0BlVcZoCfkk/3MoEfg==
"@openzeppelin/contracts-upgradeable@^4.5.2":
version "4.7.2"
resolved "https://registry.yarnpkg.com/@openzeppelin/contracts-upgradeable/-/contracts-upgradeable-4.7.2.tgz#414096e21f048200cbb7ad4fe4c6de2e822513bf"
integrity sha512-3dgc6qVmFch/uOmlmKnw5/v3JxwXcZD4T10/9CI1OUbX8AqjoZrBGKfxN1z3QxnIXRU/X31/BItJezJSDDTe7Q==
"@openzeppelin/contracts@3.4.1-solc-0.7-2": "@openzeppelin/contracts@3.4.1-solc-0.7-2":
version "3.4.1-solc-0.7-2" version "3.4.1-solc-0.7-2"
resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-3.4.1-solc-0.7-2.tgz#371c67ebffe50f551c3146a9eec5fe6ffe862e92" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-3.4.1-solc-0.7-2.tgz#371c67ebffe50f551c3146a9eec5fe6ffe862e92"
...@@ -2851,6 +2899,11 @@ ...@@ -2851,6 +2899,11 @@
resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.4.0.tgz#4a1df71f736c31230bbbd634dfb006a756b51e6b" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.4.0.tgz#4a1df71f736c31230bbbd634dfb006a756b51e6b"
integrity sha512-dlKiZmDvJnGRLHojrDoFZJmsQVeltVeoiRN7RK+cf2FmkhASDEblE0RiaYdxPNsUZa6mRG8393b9bfyp+V5IAw== integrity sha512-dlKiZmDvJnGRLHojrDoFZJmsQVeltVeoiRN7RK+cf2FmkhASDEblE0RiaYdxPNsUZa6mRG8393b9bfyp+V5IAw==
"@openzeppelin/contracts@^4.5.0":
version "4.7.2"
resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.7.2.tgz#7587416fe2d35abf574193515b8971bfe9f64bc7"
integrity sha512-4n/JL9izql8303mPqPdubuna/DWEMbmOzWYUWyCPhjhiEr2w3nQrjE7vZz1fBF+wzzP6dZbIcsgqACk53c9FGA==
"@primitivefi/hardhat-dodoc@^0.1.3": "@primitivefi/hardhat-dodoc@^0.1.3":
version "0.1.3" version "0.1.3"
resolved "https://registry.yarnpkg.com/@primitivefi/hardhat-dodoc/-/hardhat-dodoc-0.1.3.tgz#338ecff24b93d3b43fa35a98909f6840af86c27c" resolved "https://registry.yarnpkg.com/@primitivefi/hardhat-dodoc/-/hardhat-dodoc-0.1.3.tgz#338ecff24b93d3b43fa35a98909f6840af86c27c"
...@@ -2858,6 +2911,11 @@ ...@@ -2858,6 +2911,11 @@
dependencies: dependencies:
squirrelly "^8.0.8" squirrelly "^8.0.8"
"@rari-capital/solmate@git+https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc":
version "7.0.0-alpha.3"
uid "8f9b23f8838670afda0fd8983f2c41e8037ae6bc"
resolved "git+https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc"
"@rari-capital/solmate@https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc": "@rari-capital/solmate@https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc":
version "7.0.0-alpha.3" version "7.0.0-alpha.3"
resolved "https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc" resolved "https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc"
...@@ -7168,6 +7226,11 @@ drbg.js@^1.0.1: ...@@ -7168,6 +7226,11 @@ drbg.js@^1.0.1:
create-hash "^1.1.2" create-hash "^1.1.2"
create-hmac "^1.1.4" create-hmac "^1.1.4"
"ds-test@git+https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5":
version "0.0.0"
uid "9310e879db8ba3ea6d5c6489a579118fd264a3f5"
resolved "git+https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5"
"ds-test@https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5": "ds-test@https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5":
version "0.0.0" version "0.0.0"
resolved "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5" resolved "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5"
...@@ -8496,6 +8559,42 @@ ethers@^5.5.2, ethers@^5.5.3, ethers@^5.6.8: ...@@ -8496,6 +8559,42 @@ ethers@^5.5.2, ethers@^5.5.3, ethers@^5.6.8:
"@ethersproject/web" "5.6.1" "@ethersproject/web" "5.6.1"
"@ethersproject/wordlists" "5.6.1" "@ethersproject/wordlists" "5.6.1"
ethers@^5.6.9:
version "5.6.9"
resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.6.9.tgz#4e12f8dfcb67b88ae7a78a9519b384c23c576a4d"
integrity sha512-lMGC2zv9HC5EC+8r429WaWu3uWJUCgUCt8xxKCFqkrFuBDZXDYIdzDUECxzjf2BMF8IVBByY1EBoGSL3RTm8RA==
dependencies:
"@ethersproject/abi" "5.6.4"
"@ethersproject/abstract-provider" "5.6.1"
"@ethersproject/abstract-signer" "5.6.2"
"@ethersproject/address" "5.6.1"
"@ethersproject/base64" "5.6.1"
"@ethersproject/basex" "5.6.1"
"@ethersproject/bignumber" "5.6.2"
"@ethersproject/bytes" "5.6.1"
"@ethersproject/constants" "5.6.1"
"@ethersproject/contracts" "5.6.2"
"@ethersproject/hash" "5.6.1"
"@ethersproject/hdnode" "5.6.2"
"@ethersproject/json-wallets" "5.6.1"
"@ethersproject/keccak256" "5.6.1"
"@ethersproject/logger" "5.6.0"
"@ethersproject/networks" "5.6.4"
"@ethersproject/pbkdf2" "5.6.1"
"@ethersproject/properties" "5.6.0"
"@ethersproject/providers" "5.6.8"
"@ethersproject/random" "5.6.1"
"@ethersproject/rlp" "5.6.1"
"@ethersproject/sha2" "5.6.1"
"@ethersproject/signing-key" "5.6.2"
"@ethersproject/solidity" "5.6.1"
"@ethersproject/strings" "5.6.1"
"@ethersproject/transactions" "5.6.2"
"@ethersproject/units" "5.6.1"
"@ethersproject/wallet" "5.6.2"
"@ethersproject/web" "5.6.1"
"@ethersproject/wordlists" "5.6.1"
ethjs-unit@0.1.6: ethjs-unit@0.1.6:
version "0.1.6" version "0.1.6"
resolved "https://registry.yarnpkg.com/ethjs-unit/-/ethjs-unit-0.1.6.tgz#c665921e476e87bce2a9d588a6fe0405b2c41699" resolved "https://registry.yarnpkg.com/ethjs-unit/-/ethjs-unit-0.1.6.tgz#c665921e476e87bce2a9d588a6fe0405b2c41699"
...@@ -8540,6 +8639,10 @@ evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: ...@@ -8540,6 +8639,10 @@ evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3:
md5.js "^1.3.4" md5.js "^1.3.4"
safe-buffer "^5.1.1" safe-buffer "^5.1.1"
"excessively-safe-call@git+https://github.com/nomad-xyz/ExcessivelySafeCall.git#4fcdfd3593d21381f696c790fa6180b8ef559c1e":
version "0.0.1-rc.1"
resolved "git+https://github.com/nomad-xyz/ExcessivelySafeCall.git#4fcdfd3593d21381f696c790fa6180b8ef559c1e"
"excessively-safe-call@https://github.com/nomad-xyz/ExcessivelySafeCall.git#81cd99ce3e69117d665d7601c330ea03b97acce0": "excessively-safe-call@https://github.com/nomad-xyz/ExcessivelySafeCall.git#81cd99ce3e69117d665d7601c330ea03b97acce0":
version "0.0.1-rc.1" version "0.0.1-rc.1"
resolved "https://github.com/nomad-xyz/ExcessivelySafeCall.git#81cd99ce3e69117d665d7601c330ea03b97acce0" resolved "https://github.com/nomad-xyz/ExcessivelySafeCall.git#81cd99ce3e69117d665d7601c330ea03b97acce0"
...@@ -9056,6 +9159,11 @@ forever-agent@~0.6.1: ...@@ -9056,6 +9159,11 @@ forever-agent@~0.6.1:
resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=
"forge-std@git+https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa":
version "0.0.0"
uid f18682b2874fc57d7c80a511fed0b35ec4201ffa
resolved "git+https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa"
"forge-std@https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa": "forge-std@https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa":
version "0.0.0" version "0.0.0"
resolved "https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa" resolved "https://github.com/foundry-rs/forge-std.git#f18682b2874fc57d7c80a511fed0b35ec4201ffa"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment