Commit 72ac2547 authored by Matthew Slipper's avatar Matthew Slipper

Merge branch 'develop' into tushar/client-pod-issue-131/initializer-ci-check

parents 9146ab86 1bc3691b
...@@ -58,6 +58,19 @@ commands: ...@@ -58,6 +58,19 @@ commands:
cd ops/check-changed cd ops/check-changed
pip3 install -r requirements.txt pip3 install -r requirements.txt
python3 main.py "<<parameters.patterns>>" python3 main.py "<<parameters.patterns>>"
notify-failures-on-develop:
description: "Notify Slack"
parameters:
channel:
type: string
default: C03N11M0BBN
steps:
- slack/notify:
channel: << parameters.channel >>
event: fail
template: basic_fail_1
branch_pattern: develop
jobs: jobs:
cannon-go-lint-and-test: cannon-go-lint-and-test:
docker: docker:
...@@ -231,6 +244,7 @@ jobs: ...@@ -231,6 +244,7 @@ jobs:
DOCKER_OUTPUT_DESTINATION="" DOCKER_OUTPUT_DESTINATION=""
if [ "<<parameters.publish>>" == "true" ]; then if [ "<<parameters.publish>>" == "true" ]; then
gcloud auth configure-docker <<parameters.registry>>
echo "Building for platforms $PLATFORMS and then publishing to registry" echo "Building for platforms $PLATFORMS and then publishing to registry"
DOCKER_OUTPUT_DESTINATION="--push" DOCKER_OUTPUT_DESTINATION="--push"
if [ "<<parameters.save_image_tag>>" != "" ]; then if [ "<<parameters.save_image_tag>>" != "" ]; then
...@@ -259,6 +273,10 @@ jobs: ...@@ -259,6 +273,10 @@ jobs:
<<parameters.docker_name>> <<parameters.docker_name>>
no_output_timeout: 45m no_output_timeout: 45m
- when:
condition: "<<parameters.publish>>"
steps:
- notify-failures-on-develop
- when: - when:
condition: "<<parameters.save_image_tag>>" condition: "<<parameters.save_image_tag>>"
steps: steps:
...@@ -271,20 +289,6 @@ jobs: ...@@ -271,20 +289,6 @@ jobs:
root: /tmp/docker_images root: /tmp/docker_images
paths: # only write the one file, to avoid concurrent workspace-file additions paths: # only write the one file, to avoid concurrent workspace-file additions
- "<<parameters.docker_name>>.tar" - "<<parameters.docker_name>>.tar"
- when:
condition: "<<parameters.publish>>"
steps:
- run:
name: Publish
command: |
gcloud auth configure-docker <<parameters.registry>>
IMAGE_BASE="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>"
# tags, without the '-t ' here, so we can loop over them
DOCKER_TAGS="$(echo -ne "<<parameters.docker_tags>>" | sed "s/,/\n/g" | sed "s/[^a-zA-Z0-9\n]/-/g" | sed -e "s|^|${IMAGE_BASE}:|")"
for docker_image_tag in $DOCKER_TAGS; do
docker image push $docker_image_tag
done
no_output_timeout: 45m
- when: - when:
condition: "<<parameters.release>>" condition: "<<parameters.release>>"
steps: steps:
...@@ -598,10 +602,7 @@ jobs: ...@@ -598,10 +602,7 @@ jobs:
- run: - run:
name: Check TODO issues name: Check TODO issues
command: ./ops/scripts/todo-checker.sh --verbose command: ./ops/scripts/todo-checker.sh --verbose
- slack/notify: - notify-failures-on-develop
channel: C03N11M0BBN
event: fail
template: basic_fail_1
bedrock-markdown: bedrock-markdown:
machine: machine:
...@@ -645,10 +646,8 @@ jobs: ...@@ -645,10 +646,8 @@ jobs:
name: link lint name: link lint
command: | command: |
make bedrock-markdown-links make bedrock-markdown-links
- slack/notify: - notify-failures-on-develop:
channel: C055R639XT9 #notify-link-check channel: C055R639XT9 #notify-link-check
event: fail
template: basic_fail_1
fuzz-golang: fuzz-golang:
parameters: parameters:
...@@ -785,6 +784,10 @@ jobs: ...@@ -785,6 +784,10 @@ jobs:
target: target:
description: The make target to execute description: The make target to execute
type: string type: string
cannon_enabled:
description: Whether to include cannon tests
default: true
type: boolean
docker: docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: xlarge resource_class: xlarge
...@@ -810,7 +813,8 @@ jobs: ...@@ -810,7 +813,8 @@ jobs:
command: go tool dist list | grep mips command: go tool dist list | grep mips
- run: - run:
name: run tests name: run tests
command: command: |
export OP_E2E_CANNON_ENABLED="<<parameters.cannon_enabled>>"
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional # Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building. # constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
JUNIT_FILE=/tmp/test-results/<<parameters.module>>_<<parameters.target>>.xml make <<parameters.target>> JUNIT_FILE=/tmp/test-results/<<parameters.module>>_<<parameters.target>>.xml make <<parameters.target>>
...@@ -1127,6 +1131,7 @@ jobs: ...@@ -1127,6 +1131,7 @@ jobs:
- run: - run:
name: "Semgrep scan" name: "Semgrep scan"
command: semgrep ci command: semgrep ci
- notify-failures-on-develop
go-mod-download: go-mod-download:
docker: docker:
...@@ -1186,10 +1191,7 @@ jobs: ...@@ -1186,10 +1191,7 @@ jobs:
command: | command: |
make verify-goerli make verify-goerli
working_directory: op-program working_directory: op-program
- slack/notify: - notify-failures-on-develop
channel: C03N11M0BBN
event: fail
template: basic_fail_1
op-program-compat: op-program-compat:
docker: docker:
...@@ -1396,6 +1398,7 @@ workflows: ...@@ -1396,6 +1398,7 @@ workflows:
name: op-e2e-HTTP-tests name: op-e2e-HTTP-tests
module: op-e2e module: op-e2e
target: test-http target: test-http
cannon_enabled: false
requires: requires:
- op-stack-go-lint - op-stack-go-lint
- devnet-allocs - devnet-allocs
...@@ -1403,6 +1406,7 @@ workflows: ...@@ -1403,6 +1406,7 @@ workflows:
name: op-e2e-ext-geth-tests name: op-e2e-ext-geth-tests
module: op-e2e module: op-e2e
target: test-external-geth target: test-external-geth
cannon_enabled: false
requires: requires:
- op-stack-go-lint - op-stack-go-lint
- devnet-allocs - devnet-allocs
...@@ -1532,7 +1536,7 @@ workflows: ...@@ -1532,7 +1536,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-heartbeat docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
requires: ['op-stack-go-docker-build-release'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
...@@ -1547,7 +1551,7 @@ workflows: ...@@ -1547,7 +1551,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-node docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
requires: ['op-stack-go-docker-build-release'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
...@@ -1562,7 +1566,7 @@ workflows: ...@@ -1562,7 +1566,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-batcher docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
requires: ['op-stack-go-docker-build-release'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
...@@ -1577,7 +1581,7 @@ workflows: ...@@ -1577,7 +1581,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-proposer docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
requires: ['op-stack-go-docker-build-release'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
...@@ -1592,7 +1596,7 @@ workflows: ...@@ -1592,7 +1596,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-challenger docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
requires: ['op-stack-go-docker-build-release'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
...@@ -1607,7 +1611,7 @@ workflows: ...@@ -1607,7 +1611,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: op-ufm docker_name: op-ufm
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
publish: true publish: true
release: true release: true
context: context:
...@@ -1622,7 +1626,7 @@ workflows: ...@@ -1622,7 +1626,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: proxyd docker_name: proxyd
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
publish: true publish: true
release: true release: true
context: context:
...@@ -1637,7 +1641,7 @@ workflows: ...@@ -1637,7 +1641,7 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
docker_name: indexer docker_name: indexer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>
publish: true publish: true
release: true release: true
context: context:
...@@ -1711,6 +1715,7 @@ workflows: ...@@ -1711,6 +1715,7 @@ workflows:
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-node-docker-publish name: op-node-docker-publish
docker_name: op-node docker_name: op-node
...@@ -1720,6 +1725,7 @@ workflows: ...@@ -1720,6 +1725,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-batcher-docker-publish name: op-batcher-docker-publish
docker_name: op-batcher docker_name: op-batcher
...@@ -1729,6 +1735,7 @@ workflows: ...@@ -1729,6 +1735,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-program-docker-publish name: op-program-docker-publish
docker_name: op-program docker_name: op-program
...@@ -1738,6 +1745,7 @@ workflows: ...@@ -1738,6 +1745,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-proposer-docker-publish name: op-proposer-docker-publish
docker_name: op-proposer docker_name: op-proposer
...@@ -1747,6 +1755,7 @@ workflows: ...@@ -1747,6 +1755,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-challenger-docker-publish name: op-challenger-docker-publish
docker_name: op-challenger docker_name: op-challenger
...@@ -1756,6 +1765,7 @@ workflows: ...@@ -1756,6 +1765,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-heartbeat-docker-publish name: op-heartbeat-docker-publish
docker_name: op-heartbeat docker_name: op-heartbeat
...@@ -1765,6 +1775,7 @@ workflows: ...@@ -1765,6 +1775,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: indexer-docker-publish name: indexer-docker-publish
docker_name: indexer docker_name: indexer
...@@ -1772,6 +1783,7 @@ workflows: ...@@ -1772,6 +1783,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
- docker-build: - docker-build:
name: chain-mon-docker-publish name: chain-mon-docker-publish
...@@ -1780,6 +1792,7 @@ workflows: ...@@ -1780,6 +1792,7 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
- docker-build: - docker-build:
name: ufm-metamask-docker-publish name: ufm-metamask-docker-publish
docker_name: ufm-metamask docker_name: ufm-metamask
...@@ -1787,3 +1800,4 @@ workflows: ...@@ -1787,3 +1800,4 @@ workflows:
publish: true publish: true
context: context:
- oplabs-gcr - oplabs-gcr
- slack
...@@ -17,6 +17,3 @@ ...@@ -17,6 +17,3 @@
path = packages/contracts-bedrock/lib/safe-contracts path = packages/contracts-bedrock/lib/safe-contracts
url = https://github.com/safe-global/safe-contracts url = https://github.com/safe-global/safe-contracts
branch = v1.4.0 branch = v1.4.0
...@@ -3,6 +3,9 @@ ITESTS_L2_HOST=http://localhost:9545 ...@@ -3,6 +3,9 @@ ITESTS_L2_HOST=http://localhost:9545
BEDROCK_TAGS_REMOTE?=origin BEDROCK_TAGS_REMOTE?=origin
OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
# Requires at least Python v3.9; specify a minor version below if needed
PYTHON?=python3
build: build-go build-ts build: build-go build-ts
.PHONY: build .PHONY: build
...@@ -114,14 +117,14 @@ pre-devnet: ...@@ -114,14 +117,14 @@ pre-devnet:
devnet-up: pre-devnet devnet-up: pre-devnet
./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \ ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \
|| make devnet-allocs || make devnet-allocs
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=.
.PHONY: devnet-up .PHONY: devnet-up
# alias for devnet-up # alias for devnet-up
devnet-up-deploy: devnet-up devnet-up-deploy: devnet-up
devnet-test: pre-devnet devnet-test: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --test
.PHONY: devnet-test .PHONY: devnet-test
devnet-down: devnet-down:
...@@ -137,7 +140,7 @@ devnet-clean: ...@@ -137,7 +140,7 @@ devnet-clean:
.PHONY: devnet-clean .PHONY: devnet-clean
devnet-allocs: pre-devnet devnet-allocs: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs: devnet-logs:
@(cd ./ops-bedrock && docker compose logs -f) @(cd ./ops-bedrock && docker compose logs -f)
......
comment: false codecov:
require_ci_to_pass: false
comment:
layout: "diff, flags, files"
behavior: default
require_changes: false
flags:
- contracts-bedrock-tests
ignore: ignore:
- "op-e2e" - "op-e2e"
- "**/*.t.sol"
- "op-bindings/bindings/*.go" - "op-bindings/bindings/*.go"
- "**/*.t.sol"
- "packages/contracts-bedrock/test/**/*.sol"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol" - "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- 'packages/contracts-bedrock/contracts/EAS/**/*.sol' - 'packages/contracts-bedrock/contracts/EAS/**/*.sol'
coverage: coverage:
...@@ -13,6 +22,7 @@ coverage: ...@@ -13,6 +22,7 @@ coverage:
threshold: 0% # coverage is not allowed to reduce vs. the PR base threshold: 0% # coverage is not allowed to reduce vs. the PR base
base: auto base: auto
informational: true informational: true
enabled: true
project: project:
default: default:
informational: true informational: true
......
...@@ -158,7 +158,7 @@ target "ufm-metamask" { ...@@ -158,7 +158,7 @@ target "ufm-metamask" {
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ufm-metamask:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ufm-metamask:${tag}"]
} }
type "chain-mon" { target "chain-mon" {
dockerfile = "./ops/docker/Dockerfile.packages" dockerfile = "./ops/docker/Dockerfile.packages"
context = "." context = "."
args = { args = {
...@@ -173,9 +173,9 @@ type "chain-mon" { ...@@ -173,9 +173,9 @@ type "chain-mon" {
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/chain-mon:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/chain-mon:${tag}"]
} }
type "ci-builder" { target "ci-builder" {
dockerfile = "Dockerfile" dockerfile = "./ops/docker/ci-builder/Dockerfile"
context = "ops/docker/ci-builder" context = "."
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"]
} }
......
...@@ -16,6 +16,7 @@ func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) mo ...@@ -16,6 +16,7 @@ func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) mo
item := models.WithdrawalItem{ item := models.WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(), Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
L2BlockHash: withdrawal.L2BlockHash.String(), L2BlockHash: withdrawal.L2BlockHash.String(),
Timestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(), From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(), To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(), TransactionHash: withdrawal.L2TransactionHash.String(),
......
This diff is collapsed.
This diff is collapsed.
package actions package actions
import ( import (
"context"
"math/big" "math/big"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
...@@ -98,22 +95,12 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action { ...@@ -98,22 +95,12 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action {
t.InvalidAction("no tx inclusion when not building l1 block") t.InvalidAction("no tx inclusion when not building l1 block")
return return
} }
var i uint64 getPendingIndex := func(from common.Address) uint64 {
var txs []*types.Transaction return s.pendingIndices[from]
var q []*types.Transaction }
// Wait for the tx to be in the pending tx queue tx := firstValidTx(t, from, getPendingIndex, s.eth.TxPool().ContentFrom, s.EthClient().NonceAt)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
i = s.pendingIndices[from]
txs, q = s.eth.TxPool().ContentFrom(from)
return uint64(len(txs)) > i, nil
})
require.NoError(t, err,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err)
tx := txs[i]
s.IncludeTx(t, tx) s.IncludeTx(t, tx)
s.pendingIndices[from] = i + 1 // won't retry the tx s.pendingIndices[from] = s.pendingIndices[from] + 1 // won't retry the tx
} }
} }
......
package actions package actions
import ( import (
"context"
"errors" "errors"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -179,22 +176,8 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action { ...@@ -179,22 +176,8 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return return
} }
var i uint64 tx := firstValidTx(t, from, e.engineApi.PendingIndices, e.eth.TxPool().ContentFrom, e.EthClient().NonceAt)
var txs []*types.Transaction err := e.engineApi.IncludeTx(tx, from)
var q []*types.Transaction
// Wait for the tx to be in the pending tx queue
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
i = e.engineApi.PendingIndices(from)
txs, q = e.eth.TxPool().ContentFrom(from)
return uint64(len(txs)) > i, nil
})
require.NoError(t, err,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err)
tx := txs[i]
err = e.engineApi.IncludeTx(tx, from)
if errors.Is(err, engineapi.ErrNotBuildingBlock) { if errors.Is(err, engineapi.ErrNotBuildingBlock) {
t.InvalidAction(err.Error()) t.InvalidAction(err.Error())
} else if errors.Is(err, engineapi.ErrUsesTooMuchGas) { } else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
......
package actions
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
// firstValidTx finds the first transaction that is valid for inclusion from the specified address.
// It uses a waiter and filtering of already included transactions to avoid race conditions with the async
// updates to the transaction pool.
func firstValidTx(
t Testing,
from common.Address,
pendingIndices func(common.Address) uint64,
contentFrom func(common.Address) ([]*types.Transaction, []*types.Transaction),
nonceAt func(context.Context, common.Address, *big.Int) (uint64, error),
) *types.Transaction {
var i uint64
var txs []*types.Transaction
var q []*types.Transaction
// Wait for the tx to be in the pending tx queue
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
i = pendingIndices(from)
txs, q = contentFrom(from)
// Remove any transactions that have already been included in the head block
// The tx pool only prunes included transactions async so they may still be in the list
nonce, err := nonceAt(ctx, from, nil)
if err != nil {
return false, err
}
for len(txs) > 0 && txs[0].Nonce() < nonce {
t.Logf("Removing already included transaction from list of length %v", len(txs))
txs = txs[1:]
}
return uint64(len(txs)) > i, nil
})
require.NoError(t, err,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err)
return txs[i]
}
...@@ -3,6 +3,7 @@ package disputegame ...@@ -3,6 +3,7 @@ package disputegame
import ( import (
"context" "context"
"errors" "errors"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -96,7 +97,9 @@ func (d *DishonestHelper) ExhaustDishonestClaims(ctx context.Context) { ...@@ -96,7 +97,9 @@ func (d *DishonestHelper) ExhaustDishonestClaims(ctx context.Context) {
var numClaimsSeen int64 var numClaimsSeen int64
for { for {
newCount, err := d.WaitForNewClaim(ctx, numClaimsSeen) // Use a short timeout since we don't know the challenger will respond,
// and this is only designed for the alphabet game where the response should be fast.
newCount, err := d.waitForNewClaim(ctx, numClaimsSeen, 30*time.Second)
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
// we assume that the honest challenger has stopped responding // we assume that the honest challenger has stopped responding
// There's nothing to respond to. // There's nothing to respond to.
......
...@@ -304,7 +304,10 @@ func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mo ...@@ -304,7 +304,10 @@ func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mo
} }
func (g *FaultGameHelper) WaitForNewClaim(ctx context.Context, checkPoint int64) (int64, error) { func (g *FaultGameHelper) WaitForNewClaim(ctx context.Context, checkPoint int64) (int64, error) {
timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout) return g.waitForNewClaim(ctx, checkPoint, defaultTimeout)
}
func (g *FaultGameHelper) waitForNewClaim(ctx context.Context, checkPoint int64, timeout time.Duration) (int64, error) {
timedCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel() defer cancel()
var newClaimLen int64 var newClaimLen int64
err := wait.For(timedCtx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
......
...@@ -2,6 +2,7 @@ package op_e2e ...@@ -2,6 +2,7 @@ package op_e2e
import ( import (
"encoding/json" "encoding/json"
"errors"
"math/big" "math/big"
"os" "os"
"os/exec" "os/exec"
...@@ -51,6 +52,11 @@ func (eec *ExternalEthClient) Close() error { ...@@ -51,6 +52,11 @@ func (eec *ExternalEthClient) Close() error {
select { select {
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
eec.Session.Kill() eec.Session.Kill()
select {
case <-time.After(30 * time.Second):
return errors.New("external client failed to terminate")
case <-eec.Session.Exited:
}
case <-eec.Session.Exited: case <-eec.Session.Exited:
} }
return nil return nil
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
) )
func TestMultipleCannonGames(t *testing.T) { func TestMultipleCannonGames(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -78,7 +78,7 @@ func TestMultipleCannonGames(t *testing.T) { ...@@ -78,7 +78,7 @@ func TestMultipleCannonGames(t *testing.T) {
} }
func TestMultipleGameTypes(t *testing.T) { func TestMultipleGameTypes(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -277,7 +277,7 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) { ...@@ -277,7 +277,7 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
} }
func TestCannonDisputeGame(t *testing.T) { func TestCannonDisputeGame(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
tests := []struct { tests := []struct {
name string name string
...@@ -328,7 +328,7 @@ func TestCannonDisputeGame(t *testing.T) { ...@@ -328,7 +328,7 @@ func TestCannonDisputeGame(t *testing.T) {
} }
func TestCannonDefendStep(t *testing.T) { func TestCannonDefendStep(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -370,7 +370,7 @@ func TestCannonDefendStep(t *testing.T) { ...@@ -370,7 +370,7 @@ func TestCannonDefendStep(t *testing.T) {
} }
func TestCannonProposedOutputRootInvalid(t *testing.T) { func TestCannonProposedOutputRootInvalid(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
// honestStepsFail attempts to perform both an attack and defend step using the correct trace. // honestStepsFail attempts to perform both an attack and defend step using the correct trace.
honestStepsFail := func(ctx context.Context, game *disputegame.CannonGameHelper, correctTrace *disputegame.HonestHelper, parentClaimIdx int64) { honestStepsFail := func(ctx context.Context, game *disputegame.CannonGameHelper, correctTrace *disputegame.HonestHelper, parentClaimIdx int64) {
// Attack step should fail // Attack step should fail
...@@ -448,7 +448,7 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) { ...@@ -448,7 +448,7 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) {
} }
func TestCannonPoisonedPostState(t *testing.T) { func TestCannonPoisonedPostState(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -558,7 +558,7 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash) ...@@ -558,7 +558,7 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash)
} }
func TestCannonChallengeWithCorrectRoot(t *testing.T) { func TestCannonChallengeWithCorrectRoot(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
......
...@@ -7,9 +7,18 @@ import ( ...@@ -7,9 +7,18 @@ import (
var enableParallelTesting bool = os.Getenv("OP_E2E_DISABLE_PARALLEL") != "true" var enableParallelTesting bool = os.Getenv("OP_E2E_DISABLE_PARALLEL") != "true"
func InitParallel(t *testing.T) { func InitParallel(t *testing.T, opts ...func(t *testing.T)) {
t.Helper() t.Helper()
if enableParallelTesting { if enableParallelTesting {
t.Parallel() t.Parallel()
} }
for _, opt := range opts {
opt(t)
}
}
func UsesCannon(t *testing.T) {
if os.Getenv("OP_E2E_CANNON_ENABLED") == "false" {
t.Skip("Skipping cannon test")
}
} }
...@@ -102,7 +102,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e ...@@ -102,7 +102,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
) )
require.Nil(t, err) require.Nil(t, err)
l2Client, err := ethclient.Dial(node.HTTPEndpoint()) l2Client, err := ethclient.Dial(selectEndpoint(node))
require.Nil(t, err) require.Nil(t, err)
genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock, cfg.DeployConfig.CanyonTime(l2GenesisBlock.Time())) genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock, cfg.DeployConfig.CanyonTime(l2GenesisBlock.Time()))
......
...@@ -758,9 +758,12 @@ func (sys *System) newMockNetPeer() (host.Host, error) { ...@@ -758,9 +758,12 @@ func (sys *System) newMockNetPeer() (host.Host, error) {
return sys.Mocknet.AddPeerWithPeerstore(p, eps) return sys.Mocknet.AddPeerWithPeerstore(p, eps)
} }
func UseHTTP() bool {
return os.Getenv("OP_E2E_USE_HTTP") == "true"
}
func selectEndpoint(node EthInstance) string { func selectEndpoint(node EthInstance) string {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true" if UseHTTP() {
if useHTTP {
log.Info("using HTTP client") log.Info("using HTTP client")
return node.HTTPEndpoint() return node.HTTPEndpoint()
} }
...@@ -785,9 +788,8 @@ type WSOrHTTPEndpoint interface { ...@@ -785,9 +788,8 @@ type WSOrHTTPEndpoint interface {
} }
func configureL2(rollupNodeCfg *rollupNode.Config, l2Node WSOrHTTPEndpoint, jwtSecret [32]byte) { func configureL2(rollupNodeCfg *rollupNode.Config, l2Node WSOrHTTPEndpoint, jwtSecret [32]byte) {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
l2EndpointConfig := l2Node.WSAuthEndpoint() l2EndpointConfig := l2Node.WSAuthEndpoint()
if useHTTP { if UseHTTP() {
l2EndpointConfig = l2Node.HTTPAuthEndpoint() l2EndpointConfig = l2Node.HTTPAuthEndpoint()
} }
......
...@@ -82,6 +82,12 @@ var ( ...@@ -82,6 +82,12 @@ var (
return &out return &out
}(), }(),
} }
L1RethDBPath = &cli.StringFlag{
Name: "l1.rethdb",
Usage: "The L1 RethDB path, used to fetch receipts for L1 blocks. Only applicable when using the `reth_db` RPC kind with `l1.rpckind`.",
EnvVars: prefixEnvVars("L1_RETHDB"),
Required: false,
}
L1RPCRateLimit = &cli.Float64Flag{ L1RPCRateLimit = &cli.Float64Flag{
Name: "l1.rpc-rate-limit", Name: "l1.rpc-rate-limit",
Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.", Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.",
...@@ -256,6 +262,7 @@ var ( ...@@ -256,6 +262,7 @@ var (
CanyonOverrideFlag = &cli.Uint64Flag{ CanyonOverrideFlag = &cli.Uint64Flag{
Name: "override.canyon", Name: "override.canyon",
Usage: "Manually specify the Canyon fork timestamp, overriding the bundled setting", Usage: "Manually specify the Canyon fork timestamp, overriding the bundled setting",
EnvVars: prefixEnvVars("OVERRIDE_CANYON"),
Hidden: false, Hidden: false,
} }
) )
...@@ -303,6 +310,7 @@ var optionalFlags = []cli.Flag{ ...@@ -303,6 +310,7 @@ var optionalFlags = []cli.Flag{
RollupHalt, RollupHalt,
RollupLoadProtocolVersions, RollupLoadProtocolVersions,
CanyonOverrideFlag, CanyonOverrideFlag,
L1RethDBPath,
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -60,6 +60,9 @@ type Config struct { ...@@ -60,6 +60,9 @@ type Config struct {
// Cancel to request a premature shutdown of the node itself, e.g. when halting. This may be nil. // Cancel to request a premature shutdown of the node itself, e.g. when halting. This may be nil.
Cancel context.CancelCauseFunc Cancel context.CancelCauseFunc
// [OPTIONAL] The reth DB path to read receipts from
RethDBPath string
} }
type RPCConfig struct { type RPCConfig struct {
......
...@@ -156,6 +156,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error { ...@@ -156,6 +156,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
return fmt.Errorf("failed to get L1 RPC client: %w", err) return fmt.Errorf("failed to get L1 RPC client: %w", err)
} }
// Set the RethDB path in the EthClientConfig, if there is one configured.
rpcCfg.EthClientConfig.RethDBPath = cfg.RethDBPath
n.l1Source, err = sources.NewL1Client( n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg) client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg)
if err != nil { if err != nil {
......
...@@ -104,6 +104,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -104,6 +104,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
ConfigPersistence: configPersistence, ConfigPersistence: configPersistence,
Sync: *syncConfig, Sync: *syncConfig,
RollupHalt: haltOption, RollupHalt: haltOption,
RethDBPath: ctx.String(flags.L1RethDBPath.Name),
} }
if err := cfg.LoadPersisted(log); err != nil { if err := cfg.LoadPersisted(log); err != nil {
......
# Target
target/
# Bindings
rdb.h
This diff is collapsed.
[package]
name = "rethdb-reader"
description = "A simple library for reading data through Reth's DB abstractions."
version = "0.1.0"
edition = "2021"
[lib]
name = "rethdbreader"
crate-type = ["cdylib"]
[dependencies]
reth = { git = "https://github.com/paradigmxyz/reth.git" }
serde = "1.0.190"
serde_json = "1.0.107"
anyhow = "1.0.75"
# `rethdb-reader`
A dylib to be accessed via FFI in `op-service`'s `sources` package for reading information
directly from the `reth` database.
## Developing
**Building**
To build the dylib, you must first have the [Rust Toolchain][rust-toolchain] installed.
```sh
cargo build --release
```
**Docs**
Documentation is available via rustdoc.
```sh
cargo doc --open
```
**Linting**
```sh
cargo +nightly fmt -- && cargo +nightly clippy --all --all-features -- -D warnings
```
**Generating the C header**
To generate the C header, first install `cbindgen` via `cargo install cbindgen --force`. Then, run the generation script:
```sh
./headgen.sh
```
### C Header
The C header below is generated by `cbindgen`, and it is the interface that consumers of the dylib use to call its exported
functions. Currently, the only exported functions pertain to reading fully hydrated block receipts from the database.
```c
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
/**
* A [ReceiptsResult] is a wrapper around a JSON string containing serialized [TransactionReceipt]s
* as well as an error status that is compatible with FFI.
*
* # Safety
* - When the `error` field is false, the `data` pointer is guaranteed to be valid.
* - When the `error` field is true, the `data` pointer is guaranteed to be null.
*/
typedef struct ReceiptsResult {
uint32_t *data;
uintptr_t data_len;
bool error;
} ReceiptsResult;
/**
* Read the receipts for a blockhash from the RETH database directly.
*
* # Safety
* - All possible nil pointer dereferences are checked, and the function will return a
* failing [ReceiptsResult] if any are found.
*/
struct ReceiptsResult rdb_read_receipts(const uint8_t *block_hash,
uintptr_t block_hash_len,
const char *db_path);
/**
* Free a string that was allocated in Rust and passed to C.
*
* # Safety
* - All possible nil pointer dereferences are checked.
*/
void rdb_free_string(char *string);
```
[rust-toolchain]: https://rustup.rs/
#!/bin/bash
set -e
# Generate rdb.h
cbindgen --crate rethdb-reader --output rdb.h -l C
# Process README.md to replace the content within the specified code block
awk '
BEGIN { in_code_block=0; }
/^```c/ { in_code_block=1; print; next; }
/^```/ && in_code_block { in_code_block=0; while ((getline line < "rdb.h") > 0) print line; }
!in_code_block { print; }
' README.md > README.tmp && mv README.tmp README.md
echo "Generated C header successfully"
#![doc = include_str!("../README.md")]
use receipts::{read_receipts_inner, ReceiptsResult};
use std::os::raw::c_char;
mod receipts;
/// Read the receipts for a blockhash from the RETH database directly.
///
/// # Safety
/// - All possible nil pointer dereferences are checked, and the function will return a
/// failing [ReceiptsResult] if any are found.
#[no_mangle]
pub unsafe extern "C" fn rdb_read_receipts(
block_hash: *const u8,
block_hash_len: usize,
db_path: *const c_char,
) -> ReceiptsResult {
read_receipts_inner(block_hash, block_hash_len, db_path).unwrap_or(ReceiptsResult::fail())
}
/// Free a string that was allocated in Rust and passed to C.
///
/// # Safety
/// - All possible nil pointer dereferences are checked.
#[no_mangle]
pub unsafe extern "C" fn rdb_free_string(string: *mut c_char) {
// Convert the raw pointer back to a CString and let it go out of scope,
// which will deallocate the memory.
if !string.is_null() {
let _ = std::ffi::CString::from_raw(string);
}
}
//! This module contains the logic for reading a block's fully hydrated receipts directly from the
//! [reth] database.
use anyhow::{anyhow, Result};
use reth::{
blockchain_tree::noop::NoopBlockchainTree,
primitives::{
BlockHashOrNumber, Receipt, TransactionKind, TransactionMeta, TransactionSigned, MAINNET,
U128, U256, U64,
},
providers::{providers::BlockchainProvider, BlockReader, ProviderFactory, ReceiptProvider},
rpc::types::{Log, TransactionReceipt},
utils::db::open_db_read_only,
};
use std::{ffi::c_char, path::Path};
/// A [ReceiptsResult] is a wrapper around a JSON string containing serialized [TransactionReceipt]s
/// as well as an error status that is compatible with FFI.
///
/// # Safety
/// - When the `error` field is false, the `data` pointer is guaranteed to be valid.
/// - When the `error` field is true, the `data` pointer is guaranteed to be null.
#[repr(C)]
pub struct ReceiptsResult {
data: *mut char,
data_len: usize,
error: bool,
}
impl ReceiptsResult {
/// Constructs a successful [ReceiptsResult] from a JSON string.
pub fn success(data: *mut char, data_len: usize) -> Self {
Self {
data,
data_len,
error: false,
}
}
/// Constructs a failing [ReceiptsResult] with a null pointer to the data.
pub fn fail() -> Self {
Self {
data: std::ptr::null_mut(),
data_len: 0,
error: true,
}
}
}
/// Read the receipts for a blockhash from the RETH database directly.
///
/// # Safety
/// - All possible nil pointer dereferences are checked, and the function will return a
/// failing [ReceiptsResult] if any are found.
#[inline(always)]
pub(crate) unsafe fn read_receipts_inner(
block_hash: *const u8,
block_hash_len: usize,
db_path: *const c_char,
) -> Result<ReceiptsResult> {
// Convert the raw pointer and length back to a Rust slice
let block_hash: [u8; 32] = {
if block_hash.is_null() {
anyhow::bail!("block_hash pointer is null");
}
std::slice::from_raw_parts(block_hash, block_hash_len)
}
.try_into()?;
// Convert the *const c_char to a Rust &str
let db_path_str = {
if db_path.is_null() {
anyhow::bail!("db path pointer is null");
}
std::ffi::CStr::from_ptr(db_path)
}
.to_str()?;
let db = open_db_read_only(Path::new(db_path_str), None).map_err(|e| anyhow!(e))?;
let factory = ProviderFactory::new(db, MAINNET.clone());
// Create a read-only BlockChainProvider
let provider = BlockchainProvider::new(factory, NoopBlockchainTree::default())?;
// Fetch the block and the receipts within it
let block = provider
.block_by_hash(block_hash.into())?
.ok_or(anyhow!("Failed to fetch block"))?;
let receipts = provider
.receipts_by_block(BlockHashOrNumber::Hash(block_hash.into()))?
.ok_or(anyhow!("Failed to fetch block receipts"))?;
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash_slow();
let receipts = block
.body
.into_iter()
.zip(receipts.clone())
.enumerate()
.map(|(idx, (tx, receipt))| {
let meta = TransactionMeta {
tx_hash: tx.hash,
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas: None,
};
build_transaction_receipt_with_block_receipts(tx, meta, receipt, &receipts)
})
.collect::<Option<Vec<_>>>()
.ok_or(anyhow!("Failed to build receipts"))?;
// Convert the receipts to JSON for transport
let mut receipts_json = serde_json::to_string(&receipts)?;
// Create a ReceiptsResult with a pointer to the json-ified receipts
let res = ReceiptsResult::success(receipts_json.as_mut_ptr() as *mut char, receipts_json.len());
// Forget the `receipts_json` string so that its memory isn't freed by the
// borrow checker at the end of this scope
std::mem::forget(receipts_json); // Prevent Rust from freeing the memory
Ok(res)
}
/// Builds a hydrated [TransactionReceipt] from information in the passed transaction,
/// receipt, and block receipts.
///
/// Returns [None] if the transaction's sender could not be recovered from the signature.
#[inline(always)]
fn build_transaction_receipt_with_block_receipts(
tx: TransactionSigned,
meta: TransactionMeta,
receipt: Receipt,
all_receipts: &[Receipt],
) -> Option<TransactionReceipt> {
let transaction = tx.clone().into_ecrecovered()?;
// get the previous transaction cumulative gas used
let gas_used = if meta.index == 0 {
receipt.cumulative_gas_used
} else {
let prev_tx_idx = (meta.index - 1) as usize;
all_receipts
.get(prev_tx_idx)
.map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used)
.unwrap_or_default()
};
let mut res_receipt = TransactionReceipt {
transaction_hash: Some(meta.tx_hash),
transaction_index: U64::from(meta.index),
block_hash: Some(meta.block_hash),
block_number: Some(U256::from(meta.block_number)),
from: transaction.signer(),
to: None,
cumulative_gas_used: U256::from(receipt.cumulative_gas_used),
gas_used: Some(U256::from(gas_used)),
contract_address: None,
logs: Vec::with_capacity(receipt.logs.len()),
effective_gas_price: U128::from(transaction.effective_gas_price(meta.base_fee)),
transaction_type: tx.transaction.tx_type().into(),
// TODO pre-byzantium receipts have a post-transaction state root
state_root: None,
logs_bloom: receipt.bloom_slow(),
status_code: if receipt.success {
Some(U64::from(1))
} else {
Some(U64::from(0))
},
// EIP-4844 fields
blob_gas_price: None,
blob_gas_used: None,
};
match tx.transaction.kind() {
TransactionKind::Create => {
res_receipt.contract_address =
Some(transaction.signer().create(tx.transaction.nonce()));
}
TransactionKind::Call(addr) => {
res_receipt.to = Some(*addr);
}
}
// get number of logs in the block
let mut num_logs = 0;
for prev_receipt in all_receipts.iter().take(meta.index as usize) {
num_logs += prev_receipt.logs.len();
}
for (tx_log_idx, log) in receipt.logs.into_iter().enumerate() {
let rpclog = Log {
address: log.address,
topics: log.topics,
data: log.data,
block_hash: Some(meta.block_hash),
block_number: Some(U256::from(meta.block_number)),
transaction_hash: Some(meta.tx_hash),
transaction_index: Some(U256::from(meta.index)),
log_index: Some(U256::from(num_logs + tx_log_idx)),
removed: false,
};
res_receipt.logs.push(rpclog);
}
Some(res_receipt)
}
...@@ -62,6 +62,9 @@ type EthClientConfig struct { ...@@ -62,6 +62,9 @@ type EthClientConfig struct {
// till we re-attempt the user-preferred methods. // till we re-attempt the user-preferred methods.
// If this is 0 then the client does not fall back to less optimal but available methods. // If this is 0 then the client does not fall back to less optimal but available methods.
MethodResetDuration time.Duration MethodResetDuration time.Duration
// [OPTIONAL] The reth DB path to fetch receipts from
RethDBPath string
} }
func (c *EthClientConfig) Check() error { func (c *EthClientConfig) Check() error {
...@@ -132,6 +135,9 @@ type EthClient struct { ...@@ -132,6 +135,9 @@ type EthClient struct {
// methodResetDuration defines how long we take till we reset lastMethodsReset // methodResetDuration defines how long we take till we reset lastMethodsReset
methodResetDuration time.Duration methodResetDuration time.Duration
// [OPTIONAL] The reth DB path to fetch receipts from
rethDbPath string
} }
func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod { func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod {
...@@ -179,6 +185,7 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co ...@@ -179,6 +185,7 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind), availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(), lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration, methodResetDuration: config.MethodResetDuration,
rethDbPath: config.RethDBPath,
}, nil }, nil
} }
...@@ -357,7 +364,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e ...@@ -357,7 +364,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
job = v job = v
} else { } else {
txHashes := eth.TransactionsToHashes(txs) txHashes := eth.TransactionsToHashes(txs)
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes) job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes, s.rethDbPath)
s.receiptsCache.Add(blockHash, job) s.receiptsCache.Add(blockHash, job)
} }
receipts, err := job.Fetch(ctx) receipts, err := job.Fetch(ctx)
......
...@@ -124,6 +124,7 @@ const ( ...@@ -124,6 +124,7 @@ const (
RPCKindBasic RPCProviderKind = "basic" // try only the standard most basic receipt fetching RPCKindBasic RPCProviderKind = "basic" // try only the standard most basic receipt fetching
RPCKindAny RPCProviderKind = "any" // try any method available RPCKindAny RPCProviderKind = "any" // try any method available
RPCKindStandard RPCProviderKind = "standard" // try standard methods, including newer optimized standard RPC methods RPCKindStandard RPCProviderKind = "standard" // try standard methods, including newer optimized standard RPC methods
RPCKindRethDB RPCProviderKind = "reth_db" // read data directly from reth's database
) )
var RPCProviderKinds = []RPCProviderKind{ var RPCProviderKinds = []RPCProviderKind{
...@@ -137,6 +138,7 @@ var RPCProviderKinds = []RPCProviderKind{ ...@@ -137,6 +138,7 @@ var RPCProviderKinds = []RPCProviderKind{
RPCKindBasic, RPCKindBasic,
RPCKindAny, RPCKindAny,
RPCKindStandard, RPCKindStandard,
RPCKindRethDB,
} }
func (kind RPCProviderKind) String() string { func (kind RPCProviderKind) String() string {
...@@ -268,6 +270,18 @@ const ( ...@@ -268,6 +270,18 @@ const (
// See: // See:
// https://github.com/ledgerwatch/erigon/blob/287a3d1d6c90fc6a7a088b5ae320f93600d5a167/cmd/rpcdaemon/commands/erigon_receipts.go#LL391C24-L391C51 // https://github.com/ledgerwatch/erigon/blob/287a3d1d6c90fc6a7a088b5ae320f93600d5a167/cmd/rpcdaemon/commands/erigon_receipts.go#LL391C24-L391C51
ErigonGetBlockReceiptsByBlockHash ErigonGetBlockReceiptsByBlockHash
// RethGetBlockReceiptsMDBX is a Reth-specific receipt fetching method. It reads the data directly from reth's database, using their
// generic DB abstractions, rather than requesting it from the RPC provider.
// Available in:
// - Reth
// Method: n/a - does not use RPC.
// Params:
// - Reth: string, hex-encoded block hash
// Returns:
// - Reth: string, json-ified receipts
// See:
// - reth's DB crate documentation: https://github.com/paradigmxyz/reth/blob/main/docs/crates/db.md
RethGetBlockReceipts
// Other: // Other:
// - 250 credits, not supported, strictly worse than other options. In quicknode price-table. // - 250 credits, not supported, strictly worse than other options. In quicknode price-table.
...@@ -297,12 +311,14 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth ...@@ -297,12 +311,14 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth
case RPCKindBasic: case RPCKindBasic:
return EthGetTransactionReceiptBatch return EthGetTransactionReceiptBatch
case RPCKindAny: case RPCKindAny:
// if it's any kind of RPC provider, then try all methods // if it's any kind of RPC provider, then try all methods (except for RethGetBlockReceipts)
return AlchemyGetTransactionReceipts | EthGetBlockReceipts | return AlchemyGetTransactionReceipts | EthGetBlockReceipts |
DebugGetRawReceipts | ErigonGetBlockReceiptsByBlockHash | DebugGetRawReceipts | ErigonGetBlockReceiptsByBlockHash |
ParityGetBlockReceipts | EthGetTransactionReceiptBatch ParityGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindStandard: case RPCKindStandard:
return EthGetBlockReceipts | EthGetTransactionReceiptBatch return EthGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindRethDB:
return RethGetBlockReceipts
default: default:
return EthGetTransactionReceiptBatch return EthGetTransactionReceiptBatch
} }
...@@ -313,7 +329,9 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth ...@@ -313,7 +329,9 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth
func PickBestReceiptsFetchingMethod(kind RPCProviderKind, available ReceiptsFetchingMethod, txCount uint64) ReceiptsFetchingMethod { func PickBestReceiptsFetchingMethod(kind RPCProviderKind, available ReceiptsFetchingMethod, txCount uint64) ReceiptsFetchingMethod {
// If we have optimized methods available, it makes sense to use them, but only if the cost is // If we have optimized methods available, it makes sense to use them, but only if the cost is
// lower than fetching transactions one by one with the standard receipts RPC method. // lower than fetching transactions one by one with the standard receipts RPC method.
if kind == RPCKindAlchemy { if kind == RPCKindRethDB {
return RethGetBlockReceipts
} else if kind == RPCKindAlchemy {
if available&AlchemyGetTransactionReceipts != 0 && txCount > 250/15 { if available&AlchemyGetTransactionReceipts != 0 && txCount > 250/15 {
return AlchemyGetTransactionReceipts return AlchemyGetTransactionReceipts
} }
...@@ -371,11 +389,14 @@ type receiptsFetchingJob struct { ...@@ -371,11 +389,14 @@ type receiptsFetchingJob struct {
fetcher *IterativeBatchCall[common.Hash, *types.Receipt] fetcher *IterativeBatchCall[common.Hash, *types.Receipt]
// [OPTIONAL] RethDB path to fetch receipts from
rethDbPath string
result types.Receipts result types.Receipts
} }
func NewReceiptsFetchingJob(requester ReceiptsRequester, client rpcClient, maxBatchSize int, block eth.BlockID, func NewReceiptsFetchingJob(requester ReceiptsRequester, client rpcClient, maxBatchSize int, block eth.BlockID,
receiptHash common.Hash, txHashes []common.Hash) *receiptsFetchingJob { receiptHash common.Hash, txHashes []common.Hash, rethDb string) *receiptsFetchingJob {
return &receiptsFetchingJob{ return &receiptsFetchingJob{
requester: requester, requester: requester,
client: client, client: client,
...@@ -383,6 +404,7 @@ func NewReceiptsFetchingJob(requester ReceiptsRequester, client rpcClient, maxBa ...@@ -383,6 +404,7 @@ func NewReceiptsFetchingJob(requester ReceiptsRequester, client rpcClient, maxBa
block: block, block: block,
receiptHash: receiptHash, receiptHash: receiptHash,
txHashes: txHashes, txHashes: txHashes,
rethDbPath: rethDb,
} }
} }
...@@ -460,6 +482,15 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc ...@@ -460,6 +482,15 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc
err = job.client.CallContext(ctx, &result, "eth_getBlockReceipts", job.block.Hash) err = job.client.CallContext(ctx, &result, "eth_getBlockReceipts", job.block.Hash)
case ErigonGetBlockReceiptsByBlockHash: case ErigonGetBlockReceiptsByBlockHash:
err = job.client.CallContext(ctx, &result, "erigon_getBlockReceiptsByBlockHash", job.block.Hash) err = job.client.CallContext(ctx, &result, "erigon_getBlockReceiptsByBlockHash", job.block.Hash)
case RethGetBlockReceipts:
if job.rethDbPath == "" {
return fmt.Errorf("reth_db path not set")
}
res, err := FetchRethReceipts(job.rethDbPath, &job.block.Hash)
if err != nil {
return err
}
result = res
default: default:
err = fmt.Errorf("unknown receipt fetching method: %d", uint64(m)) err = fmt.Errorf("unknown receipt fetching method: %d", uint64(m))
} }
......
//go:build rethdb
package sources
import (
"encoding/json"
"fmt"
"unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
/*
#cgo LDFLAGS: -L../rethdb-reader/target/release -lrethdbreader
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
typedef struct {
char* data;
size_t data_len;
bool error;
} ReceiptsResult;
extern ReceiptsResult rdb_read_receipts(const uint8_t* block_hash, size_t block_hash_len, const char* db_path);
extern void rdb_free_string(char* string);
*/
import "C"
// FetchRethReceipts fetches the receipts for the given block hash directly from the Reth Database
// and populates the given results slice pointer with the receipts that were found.
func FetchRethReceipts(dbPath string, blockHash *common.Hash) (types.Receipts, error) {
if blockHash == nil {
return nil, fmt.Errorf("Must provide a block hash to fetch receipts for.")
}
// Convert the block hash to a C byte array and defer its deallocation
cBlockHash := C.CBytes(blockHash[:])
defer C.free(cBlockHash)
// Convert the db path to a C string and defer its deallocation
cDbPath := C.CString(dbPath)
defer C.free(unsafe.Pointer(cDbPath))
// Call the C function to fetch the receipts from the Reth Database
receiptsResult := C.rdb_read_receipts((*C.uint8_t)(cBlockHash), C.size_t(len(blockHash)), cDbPath)
if receiptsResult.error {
return nil, fmt.Errorf("Error fetching receipts from Reth Database.")
}
// Free the memory allocated by the C code
defer C.rdb_free_string(receiptsResult.data)
// Convert the returned JSON string to Go string and parse it
receiptsJSON := C.GoStringN(receiptsResult.data, C.int(receiptsResult.data_len))
var receipts types.Receipts
if err := json.Unmarshal([]byte(receiptsJSON), &receipts); err != nil {
return nil, err
}
return receipts, nil
}
//go:build !rethdb
package sources
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// FetchRethReceipts stub; Not available without `rethdb` build tag.
func FetchRethReceipts(dbPath string, blockHash *common.Hash) (types.Receipts, error) {
panic("unimplemented! Did you forget to enable the `rethdb` build tag?")
}
...@@ -49,7 +49,6 @@ CrossDomainOwnable_Test:test_onlyOwner_succeeds() (gas: 34883) ...@@ -49,7 +49,6 @@ CrossDomainOwnable_Test:test_onlyOwner_succeeds() (gas: 34883)
DelayedVetoable_Getters_Test:test_getters() (gas: 24466) DelayedVetoable_Getters_Test:test_getters() (gas: 24466)
DelayedVetoable_Getters_TestFail:test_getters_notZeroAddress_reverts() (gas: 36220) DelayedVetoable_Getters_TestFail:test_getters_notZeroAddress_reverts() (gas: 36220)
DelayedVetoable_HandleCall_TestFail:test_handleCall_unauthorizedInitiation_reverts() (gas: 21867) DelayedVetoable_HandleCall_TestFail:test_handleCall_unauthorizedInitiation_reverts() (gas: 21867)
DeleteOutput:test_script_succeeds() (gas: 3100)
DeployerWhitelist_Test:test_owner_succeeds() (gas: 7582) DeployerWhitelist_Test:test_owner_succeeds() (gas: 7582)
DeployerWhitelist_Test:test_storageSlots_succeeds() (gas: 33395) DeployerWhitelist_Test:test_storageSlots_succeeds() (gas: 33395)
DisputeGameFactory_Owner_Test:test_owner_succeeds() (gas: 12581) DisputeGameFactory_Owner_Test:test_owner_succeeds() (gas: 12581)
...@@ -77,7 +76,6 @@ Drippie_Test:test_status_unauthorized_reverts() (gas: 167388) ...@@ -77,7 +76,6 @@ Drippie_Test:test_status_unauthorized_reverts() (gas: 167388)
Drippie_Test:test_trigger_oneFunction_succeeds() (gas: 338226) Drippie_Test:test_trigger_oneFunction_succeeds() (gas: 338226)
Drippie_Test:test_trigger_twoFunctions_succeeds() (gas: 491907) Drippie_Test:test_trigger_twoFunctions_succeeds() (gas: 491907)
Drippie_Test:test_twice_inOneInterval_reverts() (gas: 303933) Drippie_Test:test_twice_inOneInterval_reverts() (gas: 303933)
EASUpgrader:test_script_succeeds() (gas: 3078)
FaucetTest:test_authAdmin_drip_succeeds() (gas: 366107) FaucetTest:test_authAdmin_drip_succeeds() (gas: 366107)
FaucetTest:test_drip_afterTimeout_succeeds() (gas: 447891) FaucetTest:test_drip_afterTimeout_succeeds() (gas: 447891)
FaucetTest:test_drip_beforeTimeout_reverts() (gas: 378884) FaucetTest:test_drip_beforeTimeout_reverts() (gas: 378884)
...@@ -249,7 +247,7 @@ L2OutputOracleUpgradeable_Test:test_initValuesOnImpl_succeeds() (gas: 23902) ...@@ -249,7 +247,7 @@ L2OutputOracleUpgradeable_Test:test_initValuesOnImpl_succeeds() (gas: 23902)
L2OutputOracleUpgradeable_Test:test_initValuesOnProxy_succeeds() (gas: 46800) L2OutputOracleUpgradeable_Test:test_initValuesOnProxy_succeeds() (gas: 46800)
L2OutputOracleUpgradeable_Test:test_initializeImpl_alreadyInitialized_reverts() (gas: 15216) L2OutputOracleUpgradeable_Test:test_initializeImpl_alreadyInitialized_reverts() (gas: 15216)
L2OutputOracleUpgradeable_Test:test_initializeProxy_alreadyInitialized_reverts() (gas: 20216) L2OutputOracleUpgradeable_Test:test_initializeProxy_alreadyInitialized_reverts() (gas: 20216)
L2OutputOracleUpgradeable_Test:test_upgrading_succeeds() (gas: 191455) L2OutputOracleUpgradeable_Test:test_upgrading_succeeds() (gas: 187875)
L2OutputOracle_constructor_Test:test_constructor_l2BlockTimeZero_reverts() (gas: 39022) L2OutputOracle_constructor_Test:test_constructor_l2BlockTimeZero_reverts() (gas: 39022)
L2OutputOracle_constructor_Test:test_constructor_submissionInterval_reverts() (gas: 39032) L2OutputOracle_constructor_Test:test_constructor_submissionInterval_reverts() (gas: 39032)
L2OutputOracle_constructor_Test:test_constructor_succeeds() (gas: 51777) L2OutputOracle_constructor_Test:test_constructor_succeeds() (gas: 51777)
...@@ -307,6 +305,33 @@ LegacyERC20ETH_Test:test_transferFrom_doesNotExist_reverts() (gas: 12957) ...@@ -307,6 +305,33 @@ LegacyERC20ETH_Test:test_transferFrom_doesNotExist_reverts() (gas: 12957)
LegacyERC20ETH_Test:test_transfer_doesNotExist_reverts() (gas: 10755) LegacyERC20ETH_Test:test_transfer_doesNotExist_reverts() (gas: 10755)
LegacyMessagePasser_Test:test_passMessageToL1_succeeds() (gas: 34524) LegacyMessagePasser_Test:test_passMessageToL1_succeeds() (gas: 34524)
LibPosition_Test:test_pos_correctness_succeeds() (gas: 38689) LibPosition_Test:test_pos_correctness_succeeds() (gas: 38689)
LivenessGuard_CheckAfterExecution_TestFails:test_checkAfterExecution_callerIsNotSafe_revert() (gas: 8531)
LivenessGuard_CheckTx_Test:test_checkTransaction_succeeds() (gas: 233535)
LivenessGuard_CheckTx_TestFails:test_checkTransaction_callerIsNotSafe_revert() (gas: 10358)
LivenessGuard_Constructor_Test:test_constructor_works() (gas: 1198965)
LivenessGuard_Getters_Test:test_getters_works() (gas: 10662)
LivenessGuard_OwnerManagement_Test:test_addOwner_succeeds() (gas: 274366)
LivenessGuard_OwnerManagement_Test:test_removeOwner_succeeds() (gas: 246263)
LivenessGuard_OwnerManagement_Test:test_swapOwner_succeeds() (gas: 284880)
LivenessGuard_ShowLiveness_Test:test_showLiveness_succeeds() (gas: 28831)
LivenessGuard_ShowLiveness_TestFail:test_showLiveness_callIsNotSafeOwner_reverts() (gas: 18770)
LivenessModule_CanRemove_Test:test_canRemove_works() (gas: 33026)
LivenessModule_CanRemove_TestFail:test_canRemove_notSafeOwner_reverts() (gas: 20489)
LivenessModule_Constructor_TestFail:test_constructor_minOwnersGreaterThanOwners_reverts() (gas: 83623)
LivenessModule_Constructor_TestFail:test_constructor_wrongThreshold_reverts() (gas: 92925)
LivenessModule_Get75PercentThreshold_Test:test_get75PercentThreshold_Works() (gas: 26339)
LivenessModule_Getters_Test:test_getters_works() (gas: 14853)
LivenessModule_RemoveOwners_Test:test_removeOwners_allOwners_succeeds() (gas: 1326177)
LivenessModule_RemoveOwners_Test:test_removeOwners_oneOwner_succeeds() (gas: 133975)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_belowEmptiedButNotShutDown_reverts() (gas: 1278643)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_belowMinButNotEmptied_reverts() (gas: 1281685)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_differentArrayLengths_reverts() (gas: 10502)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_guardChanged_reverts() (gas: 2839358)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_invalidThreshold_reverts() (gas: 69358)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_ownerHasShownLivenessRecently_reverts() (gas: 80971)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_ownerHasSignedRecently_reverts() (gas: 617629)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_swapToFallbackOwner_reverts() (gas: 1288036)
LivenessModule_RemoveOwners_TestFail:test_removeOwners_wrongPreviousOwner_reverts() (gas: 73954)
MIPS_Test:test_add_succeeds() (gas: 122932) MIPS_Test:test_add_succeeds() (gas: 122932)
MIPS_Test:test_addiSign_succeeds() (gas: 122923) MIPS_Test:test_addiSign_succeeds() (gas: 122923)
MIPS_Test:test_addi_succeeds() (gas: 123120) MIPS_Test:test_addi_succeeds() (gas: 123120)
......
# `OptimismPortal` Invariants # `OptimismPortal` Invariants
## Deposits of any value should always succeed unless `_to` = `address(0)` or `_isCreation` = `true`. ## Deposits of any value should always succeed unless `_to` = `address(0)` or `_isCreation` = `true`.
**Test:** [`OptimismPortal.t.sol#L147`](../test/invariants/OptimismPortal.t.sol#L147) **Test:** [`OptimismPortal.t.sol#L148`](../test/invariants/OptimismPortal.t.sol#L148)
All deposits, barring creation transactions and transactions sent to `address(0)`, should always succeed. All deposits, barring creation transactions and transactions sent to `address(0)`, should always succeed.
## `finalizeWithdrawalTransaction` should revert if the finalization period has not elapsed. ## `finalizeWithdrawalTransaction` should revert if the finalization period has not elapsed.
**Test:** [`OptimismPortal.t.sol#L170`](../test/invariants/OptimismPortal.t.sol#L170) **Test:** [`OptimismPortal.t.sol#L171`](../test/invariants/OptimismPortal.t.sol#L171)
A withdrawal that has been proven should not be able to be finalized until after the finalization period has elapsed. A withdrawal that has been proven should not be able to be finalized until after the finalization period has elapsed.
## `finalizeWithdrawalTransaction` should revert if the withdrawal has already been finalized. ## `finalizeWithdrawalTransaction` should revert if the withdrawal has already been finalized.
**Test:** [`OptimismPortal.t.sol#L200`](../test/invariants/OptimismPortal.t.sol#L200) **Test:** [`OptimismPortal.t.sol#L201`](../test/invariants/OptimismPortal.t.sol#L201)
Ensures that there is no chain of calls that can be made that allows a withdrawal to be finalized twice. Ensures that there is no chain of calls that can be made that allows a withdrawal to be finalized twice.
## A withdrawal should **always** be able to be finalized `FINALIZATION_PERIOD_SECONDS` after it was successfully proven. ## A withdrawal should **always** be able to be finalized `FINALIZATION_PERIOD_SECONDS` after it was successfully proven.
**Test:** [`OptimismPortal.t.sol#L229`](../test/invariants/OptimismPortal.t.sol#L229) **Test:** [`OptimismPortal.t.sol#L230`](../test/invariants/OptimismPortal.t.sol#L230)
This invariant asserts that there is no chain of calls that can be made that will prevent a withdrawal from being finalized exactly `FINALIZATION_PERIOD_SECONDS` after it was successfully proven. This invariant asserts that there is no chain of calls that can be made that will prevent a withdrawal from being finalized exactly `FINALIZATION_PERIOD_SECONDS` after it was successfully proven.
\ No newline at end of file
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
"src/L2/L2StandardBridge.sol": "0x284ebf5569c75d98f2d1920a276d1116524399355708c4a60ea5892283c56719", "src/L2/L2StandardBridge.sol": "0x284ebf5569c75d98f2d1920a276d1116524399355708c4a60ea5892283c56719",
"src/L2/L2ToL1MessagePasser.sol": "0xafc710b4d320ef450586d96a61cbd58cac814cb3b0c4fdc280eace3efdcdf321", "src/L2/L2ToL1MessagePasser.sol": "0xafc710b4d320ef450586d96a61cbd58cac814cb3b0c4fdc280eace3efdcdf321",
"src/L2/SequencerFeeVault.sol": "0x883e434a69b4789997a4a9a32060dbbd2e12db6f1970927f1310820336119575", "src/L2/SequencerFeeVault.sol": "0x883e434a69b4789997a4a9a32060dbbd2e12db6f1970927f1310820336119575",
"src/Safe/LivenessGuard.sol": "0xa08460138c22a337f8f5d3a17e02beffe8136c4dba58935cc5c9c2d7ffe1222c",
"src/Safe/LivenessModule.sol": "0x45621d74ea464c75064f9194261d29d47552cf4a9c4f4b3a733f5df5803fc0dd",
"src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb", "src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb",
"src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270", "src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270",
"src/dispute/FaultDisputeGame.sol": "0x0766707ab32338a6586c2340ddfbfd4e9023eeb9dfa3ef87e4b404fb0260479f", "src/dispute/FaultDisputeGame.sol": "0x0766707ab32338a6586c2340ddfbfd4e9023eeb9dfa3ef87e4b404fb0260479f",
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Safe } from "safe-contracts/Safe.sol";
import { BaseGuard, GuardManager } from "safe-contracts/base/GuardManager.sol";
import { ModuleManager } from "safe-contracts/base/ModuleManager.sol";
import { SafeSigners } from "src/Safe/SafeSigners.sol";
import { Enum } from "safe-contracts/common/Enum.sol";
import { ISemver } from "src/universal/ISemver.sol";
import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
/// @title LivenessGuard
/// @notice This Guard contract is used to track the liveness of Safe owners.
/// @dev It keeps track of the last time each owner participated in signing a transaction.
/// If an owner does not participate in a transaction for a certain period of time, they are considered inactive.
/// This Guard is intended to be used in conjunction with the LivenessModule contract, but does
/// not depend on it.
/// Note: Both `checkTransaction` and `checkAfterExecution` are called once each by the Safe contract
/// before and after the execution of a transaction. It is critical that neither function revert,
/// otherwise the Safe contract will be unable to execute a transaction.
contract LivenessGuard is ISemver, BaseGuard {
using EnumerableSet for EnumerableSet.AddressSet;
/// @notice Emitted when an owner is recorded.
/// @param owner The owner's address.
event OwnerRecorded(address owner);
/// @notice Semantic version.
/// @custom:semver 1.0.0
string public constant version = "1.0.0";
/// @notice The safe account for which this contract will be the guard.
Safe internal immutable SAFE;
/// @notice A mapping of the timestamp at which an owner last participated in signing a
/// an executed transaction, or called showLiveness.
mapping(address => uint256) public lastLive;
/// @notice An enumerable set of addresses used to store the list of owners before execution,
/// and then to update the lastLive mapping according to changes in the set observed
/// after execution.
EnumerableSet.AddressSet internal ownersBefore;
/// @notice Constructor.
/// @param _safe The safe account for which this contract will be the guard.
constructor(Safe _safe) {
SAFE = _safe;
address[] memory owners = _safe.getOwners();
for (uint256 i = 0; i < owners.length; i++) {
address owner = owners[i];
lastLive[owner] = block.timestamp;
emit OwnerRecorded(owner);
}
}
/// @notice Getter function for the Safe contract instance
/// @return safe_ The Safe contract instance
function safe() public view returns (Safe safe_) {
safe_ = SAFE;
}
/// @notice Internal function to ensure that only the Safe can call certain functions.
function _requireOnlySafe() internal view {
require(msg.sender == address(SAFE), "LivenessGuard: only Safe can call this function");
}
/// @notice Records the most recent time which any owner has signed a transaction.
/// @dev Called by the Safe contract before execution of a transaction.
function checkTransaction(
address to,
uint256 value,
bytes memory data,
Enum.Operation operation,
uint256 safeTxGas,
uint256 baseGas,
uint256 gasPrice,
address gasToken,
address payable refundReceiver,
bytes memory signatures,
address msgSender
)
external
{
msgSender; // silence unused variable warning
_requireOnlySafe();
// Cache the set of owners prior to execution.
// This will be used in the checkAfterExecution method.
address[] memory owners = SAFE.getOwners();
for (uint256 i = 0; i < owners.length; i++) {
ownersBefore.add(owners[i]);
}
// This call will reenter to the Safe which is calling it. This is OK because it is only reading the
// nonce, and using the getTransactionHash() method.
bytes32 txHash = SAFE.getTransactionHash({
to: to,
value: value,
data: data,
operation: operation,
safeTxGas: safeTxGas,
baseGas: baseGas,
gasPrice: gasPrice,
gasToken: gasToken,
refundReceiver: refundReceiver,
_nonce: SAFE.nonce() - 1
});
uint256 threshold = SAFE.getThreshold();
address[] memory signers =
SafeSigners.getNSigners({ dataHash: txHash, signatures: signatures, requiredSignatures: threshold });
for (uint256 i = 0; i < signers.length; i++) {
lastLive[signers[i]] = block.timestamp;
emit OwnerRecorded(signers[i]);
}
}
/// @notice Update the lastLive mapping according to the set of owners before and after execution.
/// @dev Called by the Safe contract after the execution of a transaction.
/// We use this post execution hook to compare the set of owners before and after.
/// If the set of owners has changed then we:
/// 1. Add new owners to the lastLive mapping
/// 2. Delete removed owners from the lastLive mapping
function checkAfterExecution(bytes32, bool) external {
_requireOnlySafe();
// Get the current set of owners
address[] memory ownersAfter = SAFE.getOwners();
// Iterate over the current owners, and remove one at a time from the ownersBefore set.
for (uint256 i = 0; i < ownersAfter.length; i++) {
// If the value was present, remove() returns true.
address ownerAfter = ownersAfter[i];
if (ownersBefore.remove(ownerAfter) == false) {
// This address was not already an owner, add it to the lastLive mapping
lastLive[ownerAfter] = block.timestamp;
}
}
// Now iterate over the remaining ownersBefore entries. Any remaining addresses are no longer an owner, so we
// delete them from the lastLive mapping.
// We cache the ownersBefore set before iterating over it, because the remove() method mutates the set.
address[] memory ownersBeforeCache = ownersBefore.values();
for (uint256 i = 0; i < ownersBeforeCache.length; i++) {
address ownerBefore = ownersBeforeCache[i];
delete lastLive[ownerBefore];
ownersBefore.remove(ownerBefore);
}
}
/// @notice Enables an owner to demonstrate liveness by calling this method directly.
/// This is useful for owners who have not recently signed a transaction via the Safe.
function showLiveness() external {
require(SAFE.isOwner(msg.sender), "LivenessGuard: only Safe owners may demonstrate liveness");
lastLive[msg.sender] = block.timestamp;
emit OwnerRecorded(msg.sender);
}
}
This diff is collapsed.
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
library SafeSigners {
/// @notice Splits signature bytes into `uint8 v, bytes32 r, bytes32 s`.
/// Copied directly from
/// https://github.com/safe-global/safe-contracts/blob/e870f514ad34cd9654c72174d6d4a839e3c6639f/contracts/common/SignatureDecoder.sol
/// @dev Make sure to perform a bounds check for @param pos, to avoid out of bounds access on @param signatures
/// The signature format is a compact form of {bytes32 r}{bytes32 s}{uint8 v}
/// Compact means uint8 is not padded to 32 bytes.
/// @param pos Which signature to read.
/// A prior bounds check of this parameter should be performed, to avoid out of bounds access.
/// @param signatures Concatenated {r, s, v} signatures.
/// @return v Recovery ID or Safe signature type.
/// @return r Output value r of the signature.
/// @return s Output value s of the signature.
function signatureSplit(
bytes memory signatures,
uint256 pos
)
internal
pure
returns (uint8 v, bytes32 r, bytes32 s)
{
// solhint-disable-next-line no-inline-assembly
assembly {
let signaturePos := mul(0x41, pos)
r := mload(add(signatures, add(signaturePos, 0x20)))
s := mload(add(signatures, add(signaturePos, 0x40)))
/**
* Here we are loading the last 32 bytes, including 31 bytes
* of 's'. There is no 'mload8' to do this.
* 'byte' is not working due to the Solidity parser, so lets
* use the second best option, 'and'
*/
v := and(mload(add(signatures, add(signaturePos, 0x41))), 0xff)
}
}
/// @notice Extract the signers from a set of signatures.
/// This method is based closely on the code in the Safe.checkNSignatures() method.
/// https://github.com/safe-global/safe-contracts/blob/e870f514ad34cd9654c72174d6d4a839e3c6639f/contracts/Safe.sol#L274
/// It has been modified by removing all signature _validation_ code. We trust the Safe to properly validate
/// the signatures.
/// This method therefore simply extracts the addresses from the signatures.
function getNSigners(
bytes32 dataHash,
bytes memory signatures,
uint256 requiredSignatures
)
internal
pure
returns (address[] memory _owners)
{
_owners = new address[](requiredSignatures);
address currentOwner;
uint8 v;
bytes32 r;
bytes32 s;
uint256 i;
for (i = 0; i < requiredSignatures; i++) {
(v, r, s) = signatureSplit(signatures, i);
if (v == 0) {
// If v is 0 then it is a contract signature
// When handling contract signatures the address of the contract is encoded into r
currentOwner = address(uint160(uint256(r)));
} else if (v == 1) {
// If v is 1 then it is an approved hash
// When handling approved hashes the address of the approver is encoded into r
currentOwner = address(uint160(uint256(r)));
} else if (v > 30) {
// If v > 30 then default va (27,28) has been adjusted for eth_sign flow
// To support eth_sign and similar we adjust v and hash the messageHash with the Ethereum message prefix
// before applying ecrecover
currentOwner =
ecrecover(keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", dataHash)), v - 4, r, s);
} else {
// Default is the ecrecover flow with the provided data hash
// Use ecrecover with the messageHash for EOA signatures
currentOwner = ecrecover(dataHash, v, r, s);
}
_owners[i] = currentOwner;
}
}
}
...@@ -3,6 +3,7 @@ pragma solidity 0.8.15; ...@@ -3,6 +3,7 @@ pragma solidity 0.8.15;
// Testing utilities // Testing utilities
import { Test, StdUtils } from "forge-std/Test.sol"; import { Test, StdUtils } from "forge-std/Test.sol";
import { Vm } from "forge-std/Vm.sol";
import { L2OutputOracle } from "src/L1/L2OutputOracle.sol"; import { L2OutputOracle } from "src/L1/L2OutputOracle.sol";
import { L2ToL1MessagePasser } from "src/L2/L2ToL1MessagePasser.sol"; import { L2ToL1MessagePasser } from "src/L2/L2ToL1MessagePasser.sol";
import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; import { L1StandardBridge } from "src/L1/L1StandardBridge.sol";
...@@ -731,6 +732,18 @@ contract FFIInterface is Test { ...@@ -731,6 +732,18 @@ contract FFIInterface is Test {
} }
} }
library EIP1967Helper {
Vm internal constant vm = Vm(0x7109709ECfa91a80626fF3989D68f67F5b1DD12D);
function getAdmin(address _proxy) internal view returns (address) {
return address(uint160(uint256(vm.load(address(_proxy), Constants.PROXY_OWNER_ADDRESS))));
}
function getImplementation(address _proxy) internal view returns (address) {
return address(uint160(uint256(vm.load(address(_proxy), Constants.PROXY_IMPLEMENTATION_ADDRESS))));
}
}
// Used for testing a future upgrade beyond the current implementations. // Used for testing a future upgrade beyond the current implementations.
// We include some variables so that we can sanity check accessing storage values after an upgrade. // We include some variables so that we can sanity check accessing storage values after an upgrade.
contract NextImpl is Initializable { contract NextImpl is Initializable {
......
...@@ -3,7 +3,7 @@ pragma solidity 0.8.15; ...@@ -3,7 +3,7 @@ pragma solidity 0.8.15;
// Testing utilities // Testing utilities
import { stdError } from "forge-std/Test.sol"; import { stdError } from "forge-std/Test.sol";
import { L2OutputOracle_Initializer, NextImpl } from "test/CommonTest.t.sol"; import { L2OutputOracle_Initializer, NextImpl, EIP1967Helper } from "test/CommonTest.t.sol";
// Libraries // Libraries
import { Types } from "src/libraries/Types.sol"; import { Types } from "src/libraries/Types.sol";
...@@ -463,7 +463,7 @@ contract L2OutputOracleUpgradeable_Test is L2OutputOracle_Initializer { ...@@ -463,7 +463,7 @@ contract L2OutputOracleUpgradeable_Test is L2OutputOracle_Initializer {
assertEq(bytes32(0), slot21Before); assertEq(bytes32(0), slot21Before);
NextImpl nextImpl = new NextImpl(); NextImpl nextImpl = new NextImpl();
vm.startPrank(multisig); vm.startPrank(EIP1967Helper.getAdmin(address(proxy)));
proxy.upgradeToAndCall( proxy.upgradeToAndCall(
address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, Constants.INITIALIZER + 1) address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, Constants.INITIALIZER + 1)
); );
......
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Test } from "forge-std/Test.sol";
import { Safe } from "safe-contracts/Safe.sol";
import { SafeSigners } from "src/Safe/SafeSigners.sol";
import "test/safe-tools/SafeTestTools.sol";
import { SignatureDecoder } from "safe-contracts/common/SignatureDecoder.sol";
contract SafeSigners_Test is Test, SafeTestTools {
bytes4 internal constant EIP1271_MAGIC_VALUE = 0x20c13b0b;
enum SigTypes {
Eoa,
EthSign,
ApprovedHash,
Contract
}
/// @dev Maps every key to one of the 4 signatures types.
/// This is used in the tests below as a pseudorandom mechanism for determining which
/// signature type to use for each key.
/// @param _key The key to map to a signature type.
function sigType(uint256 _key) internal pure returns (SigTypes sigType_) {
uint256 t = _key % 4;
sigType_ = SigTypes(t);
}
/// @dev Test that for a given set of signatures:
/// 1. safe.checkNSignatures() succeeds
/// 2. the getSigners() method returns the expected signers
/// 3. the expected signers are all owners of the safe.
/// Demonstrating these three properties is sufficient to prove that the getSigners() method
/// returns the same signatures as those recovered by safe.checkNSignatures().
function testDiff_getSignaturesVsCheckSignatures_succeeds(bytes memory _data, uint256 _numSigs) external {
bytes32 digest = keccak256(_data);
// Limit the number of signatures to 25
uint256 numSigs = bound(_numSigs, 1, 25);
(, uint256[] memory keys) = SafeTestLib.makeAddrsAndKeys("getSigsTest", numSigs);
for (uint256 i = 0; i < keys.length; i++) {
if (sigType(keys[i]) == SigTypes.Contract) {
keys[i] =
SafeTestLib.encodeSmartContractWalletAsPK(SafeTestLib.decodeSmartContractWalletAsAddress(keys[i]));
}
}
// Create a new safeInstance with M=N, so that it requires a signature from each key.
SafeInstance memory safeInstance = SafeTestTools._setupSafe(keys, numSigs, 0);
// Next we will generate signatures by iterating over the keys, and choosing the signature type
// based on the key.
uint8 v;
bytes32 r;
bytes32 s;
uint256 contractSigs;
bytes memory signatures;
uint256[] memory pks = safeInstance.ownerPKs;
for (uint256 i; i < pks.length; i++) {
if (sigType(pks[i]) == SigTypes.Eoa) {
(v, r, s) = vm.sign(pks[i], digest);
signatures = bytes.concat(signatures, abi.encodePacked(r, s, v));
} else if (sigType(pks[i]) == SigTypes.EthSign) {
(v, r, s) = vm.sign(pks[i], keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", digest)));
v += 4;
signatures = bytes.concat(signatures, abi.encodePacked(r, s, v));
} else if (sigType(pks[i]) == SigTypes.ApprovedHash) {
vm.prank(SafeTestLib.getAddr(pks[i]));
safeInstance.safe.approveHash(digest);
v = 1;
// s is not checked on approved hash signatures, so we can leave it as zero.
r = bytes32(uint256(uint160(SafeTestLib.getAddr(pks[i]))));
signatures = bytes.concat(signatures, abi.encodePacked(r, s, v));
} else if (sigType(pks[i]) == SigTypes.Contract) {
contractSigs++;
address addr = SafeTestLib.decodeSmartContractWalletAsAddress(pks[i]);
r = bytes32(uint256(uint160(addr)));
vm.mockCall(
addr, abi.encodeWithSignature("isValidSignature(bytes,bytes)"), abi.encode(EIP1271_MAGIC_VALUE)
);
v = 0;
// s needs to point to data that comes after the signatures
s = bytes32(numSigs * 65);
signatures = bytes.concat(signatures, abi.encodePacked(r, s, v));
}
}
// For each contract sig, add 64 bytes to the signature data. This is necessary to satisfy
// the validation checks that the Safe contract performs on the value of s on contract
// signatures. The Safe contract checks that s correctly points to additional data appended
// after the signatures, and that the length of the data is within bounds.
for (uint256 i = 0; i < contractSigs; i++) {
signatures = bytes.concat(signatures, abi.encode(32, 1));
}
// Signature checking on the Safe should succeed.
safeInstance.safe.checkNSignatures(digest, _data, signatures, numSigs);
// Recover the signatures using the _getNSigners() method.
address[] memory gotSigners =
SafeSigners.getNSigners({ dataHash: digest, signatures: signatures, requiredSignatures: numSigs });
// Compare the list of recovered signers to the expected signers.
assertEq(gotSigners.length, numSigs);
assertEq(gotSigners.length, safeInstance.owners.length);
for (uint256 i; i < numSigs; i++) {
assertEq(safeInstance.owners[i], gotSigners[i]);
}
}
}
...@@ -12,6 +12,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol"; ...@@ -12,6 +12,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol";
import { Constants } from "src/libraries/Constants.sol"; import { Constants } from "src/libraries/Constants.sol";
import { Portal_Initializer } from "test/CommonTest.t.sol"; import { Portal_Initializer } from "test/CommonTest.t.sol";
import { EIP1967Helper } from "test/CommonTest.t.sol";
import { Types } from "src/libraries/Types.sol"; import { Types } from "src/libraries/Types.sol";
contract OptimismPortal_Depositor is StdUtils, ResourceMetering { contract OptimismPortal_Depositor is StdUtils, ResourceMetering {
...@@ -158,8 +159,8 @@ contract OptimismPortal_CannotTimeTravel is OptimismPortal_Invariant_Harness { ...@@ -158,8 +159,8 @@ contract OptimismPortal_CannotTimeTravel is OptimismPortal_Invariant_Harness {
// Set the target contract to the portal proxy // Set the target contract to the portal proxy
targetContract(address(op)); targetContract(address(op));
// Exclude the proxy multisig from the senders so that the proxy cannot be upgraded // Exclude the proxy admin from the senders so that the proxy cannot be upgraded
excludeSender(address(multisig)); excludeSender(EIP1967Helper.getAdmin(address(op)));
} }
/// @custom:invariant `finalizeWithdrawalTransaction` should revert if the finalization /// @custom:invariant `finalizeWithdrawalTransaction` should revert if the finalization
...@@ -188,8 +189,8 @@ contract OptimismPortal_CannotFinalizeTwice is OptimismPortal_Invariant_Harness ...@@ -188,8 +189,8 @@ contract OptimismPortal_CannotFinalizeTwice is OptimismPortal_Invariant_Harness
// Set the target contract to the portal proxy // Set the target contract to the portal proxy
targetContract(address(op)); targetContract(address(op));
// Exclude the proxy multisig from the senders so that the proxy cannot be upgraded // Exclude the proxy admin from the senders so that the proxy cannot be upgraded
excludeSender(address(multisig)); excludeSender(EIP1967Helper.getAdmin(address(op)));
} }
/// @custom:invariant `finalizeWithdrawalTransaction` should revert if the withdrawal /// @custom:invariant `finalizeWithdrawalTransaction` should revert if the withdrawal
...@@ -215,8 +216,8 @@ contract OptimismPortal_CanAlwaysFinalizeAfterWindow is OptimismPortal_Invariant ...@@ -215,8 +216,8 @@ contract OptimismPortal_CanAlwaysFinalizeAfterWindow is OptimismPortal_Invariant
// Set the target contract to the portal proxy // Set the target contract to the portal proxy
targetContract(address(op)); targetContract(address(op));
// Exclude the proxy multisig from the senders so that the proxy cannot be upgraded // Exclude the proxy admin from the senders so that the proxy cannot be upgraded
excludeSender(address(multisig)); excludeSender(EIP1967Helper.getAdmin(address(op)));
} }
/// @custom:invariant A withdrawal should **always** be able to be finalized /// @custom:invariant A withdrawal should **always** be able to be finalized
......
// SPDX-License-Identifier: LGPL-3.0-only
pragma solidity >=0.7.0 <0.9.0;
import "safe-contracts/interfaces/ERC1155TokenReceiver.sol";
import "safe-contracts/interfaces/ERC721TokenReceiver.sol";
import "safe-contracts/interfaces/ERC777TokensRecipient.sol";
import "safe-contracts/interfaces/IERC165.sol";
import "safe-contracts/interfaces/ISignatureValidator.sol";
import { Safe as GnosisSafe } from "safe-contracts/Safe.sol";
/// author: Colin Nielsen
/// https://github.com/colinnielsen/safe-tools/blob/ce6c654a76d91b619ab7778c77d1a76b3ced6666/src/CompatibilityFallbackHandler_1_3_0.sol
contract DefaultCallbackHandler is ERC1155TokenReceiver, ERC777TokensRecipient, ERC721TokenReceiver, IERC165 {
function onERC1155Received(
address,
address,
uint256,
uint256,
bytes calldata
)
external
pure
override
returns (bytes4)
{
return 0xf23a6e61;
}
function onERC1155BatchReceived(
address,
address,
uint256[] calldata,
uint256[] calldata,
bytes calldata
)
external
pure
override
returns (bytes4)
{
return 0xbc197c81;
}
function onERC721Received(address, address, uint256, bytes calldata) external pure override returns (bytes4) {
return 0x150b7a02;
}
function tokensReceived(
address,
address,
address,
uint256,
bytes calldata,
bytes calldata
)
external
pure
override
{
// We implement this for completeness, doesn't really have any value
}
function supportsInterface(bytes4 interfaceId) external view virtual override returns (bool) {
return interfaceId == type(ERC1155TokenReceiver).interfaceId
|| interfaceId == type(ERC721TokenReceiver).interfaceId || interfaceId == type(IERC165).interfaceId;
}
}
address constant SENTINEL_MODULES = address(0x1);
/// @title Compatibility Fallback Handler - fallback handler to provider compatibility between pre 1.3.0 and 1.3.0+ Safe
/// contracts
/// @author Richard Meissner - <richard@gnosis.pm>
contract CompatibilityFallbackHandler is DefaultCallbackHandler, ISignatureValidator {
//keccak256(
// "SafeMessage(bytes message)"
//);
bytes32 private constant SAFE_MSG_TYPEHASH = 0x60b3cbf8b4a223d68d641b3b6ddf9a298e7f33710cf3d3a9d1146b5a6150fbca;
bytes4 internal constant SIMULATE_SELECTOR = bytes4(keccak256("simulate(address,bytes)"));
bytes4 internal constant UPDATED_MAGIC_VALUE = 0x1626ba7e;
/**
* Implementation of ISignatureValidator (see `interfaces/ISignatureValidator.sol`)
* @dev Should return whether the signature provided is valid for the provided data.
* @param _data Arbitrary length data signed on the behalf of address(msg.sender)
* @param _signature Signature byte array associated with _data
* @return a bool upon valid or invalid signature with corresponding _data
*/
function isValidSignature(bytes memory _data, bytes memory _signature) public view override returns (bytes4) {
// Caller should be a Safe
GnosisSafe safe = GnosisSafe(payable(msg.sender));
bytes32 messageHash = getMessageHashForSafe(safe, _data);
if (_signature.length == 0) {
require(safe.signedMessages(messageHash) != 0, "Hash not approved");
} else {
safe.checkSignatures(messageHash, _data, _signature);
}
return EIP1271_MAGIC_VALUE;
}
/// @dev Returns hash of a message that can be signed by owners.
/// @param message Message that should be hashed
/// @return Message hash.
function getMessageHash(bytes memory message) public view returns (bytes32) {
return getMessageHashForSafe(GnosisSafe(payable(msg.sender)), message);
}
/// @dev Returns hash of a message that can be signed by owners.
/// @param safe Safe to which the message is targeted
/// @param message Message that should be hashed
/// @return Message hash.
function getMessageHashForSafe(GnosisSafe safe, bytes memory message) public view returns (bytes32) {
bytes32 safeMessageHash = keccak256(abi.encode(SAFE_MSG_TYPEHASH, keccak256(message)));
return keccak256(abi.encodePacked(bytes1(0x19), bytes1(0x01), safe.domainSeparator(), safeMessageHash));
}
/**
* Implementation of updated EIP-1271
* @dev Should return whether the signature provided is valid for the provided data.
* The save does not implement the interface since `checkSignatures` is not a view method.
* The method will not perform any state changes (see parameters of `checkSignatures`)
* @param _dataHash Hash of the data signed on the behalf of address(msg.sender)
* @param _signature Signature byte array associated with _dataHash
* @return a bool upon valid or invalid signature with corresponding _dataHash
* @notice See
* https://github.com/gnosis/util-contracts/blob/bb5fe5fb5df6d8400998094fb1b32a178a47c3a1/contracts/StorageAccessible.sol
*/
function isValidSignature(bytes32 _dataHash, bytes calldata _signature) external view returns (bytes4) {
ISignatureValidator validator = ISignatureValidator(msg.sender);
bytes4 value = validator.isValidSignature(abi.encode(_dataHash), _signature);
return (value == EIP1271_MAGIC_VALUE) ? UPDATED_MAGIC_VALUE : bytes4(0);
}
/// @dev Returns array of first 10 modules.
/// @return Array of modules.
function getModules() external view returns (address[] memory) {
// Caller should be a Safe
GnosisSafe safe = GnosisSafe(payable(msg.sender));
(address[] memory array,) = safe.getModulesPaginated(SENTINEL_MODULES, 10);
return array;
}
/**
* @dev Performs a delegetecall on a targetContract in the context of self.
* Internally reverts execution to avoid side effects (making it static). Catches revert and returns encoded result
* as bytes.
* @param targetContract Address of the contract containing the code to execute.
* @param calldataPayload Calldata that should be sent to the target contract (encoded method name and arguments).
*/
function simulate(
address targetContract,
bytes calldata calldataPayload
)
external
returns (bytes memory response)
{
// Suppress compiler warnings about not using parameters, while allowing
// parameters to keep names for documentation purposes. This does not
// generate code.
targetContract;
calldataPayload;
// solhint-disable-next-line no-inline-assembly
assembly {
let internalCalldata := mload(0x40)
// Store `simulateAndRevert.selector`.
// String representation is used to force right padding
mstore(internalCalldata, "\xb4\xfa\xba\x09")
// Abuse the fact that both this and the internal methods have the
// same signature, and differ only in symbol name (and therefore,
// selector) and copy calldata directly. This saves us approximately
// 250 bytes of code and 300 gas at runtime over the
// `abi.encodeWithSelector` builtin.
calldatacopy(add(internalCalldata, 0x04), 0x04, sub(calldatasize(), 0x04))
// `pop` is required here by the compiler, as top level expressions
// can't have return values in inline assembly. `call` typically
// returns a 0 or 1 value indicated whether or not it reverted, but
// since we know it will always revert, we can safely ignore it.
pop(
call(
gas(),
// address() has been changed to caller() to use the implementation of the Safe
caller(),
0,
internalCalldata,
calldatasize(),
// The `simulateAndRevert` call always reverts, and
// instead encodes whether or not it was successful in the return
// data. The first 32-byte word of the return data contains the
// `success` value, so write it to memory address 0x00 (which is
// reserved Solidity scratch space and OK to use).
0x00,
0x20
)
)
// Allocate and copy the response bytes, making sure to increment
// the free memory pointer accordingly (in case this method is
// called as an internal function). The remaining `returndata[0x20:]`
// contains the ABI encoded response bytes, so we can just write it
// as is to memory.
let responseSize := sub(returndatasize(), 0x20)
response := mload(0x40)
mstore(0x40, add(response, responseSize))
returndatacopy(response, 0x20, responseSize)
if iszero(mload(0x00)) { revert(add(response, 0x20), mload(response)) }
}
}
}
This diff is collapsed.
...@@ -82,6 +82,6 @@ ...@@ -82,6 +82,6 @@
"change-case": "4.1.2", "change-case": "4.1.2",
"react": "^18.2.0", "react": "^18.2.0",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"viem": "^1.17.1" "viem": "^1.18.0"
} }
} }
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.17.1", "viem": "^1.18.0",
"vite": "^4.5.0", "vite": "^4.5.0",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typedoc": "^0.25.2", "typedoc": "^0.25.2",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.17.1", "viem": "^1.18.0",
"vitest": "^0.34.2", "vitest": "^0.34.2",
"zod": "^3.22.4" "zod": "^3.22.4"
}, },
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
"@vitest/coverage-istanbul": "^0.34.6", "@vitest/coverage-istanbul": "^0.34.6",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.17.1", "viem": "^1.18.0",
"vite": "^4.5.0", "vite": "^4.5.0",
"vitest": "^0.34.1", "vitest": "^0.34.1",
"zod": "^3.22.4" "zod": "^3.22.4"
......
...@@ -298,11 +298,11 @@ importers: ...@@ -298,11 +298,11 @@ importers:
specifier: ^18.2.0 specifier: ^18.2.0
version: 18.2.0(react@18.2.0) version: 18.2.0(react@18.2.0)
viem: viem:
specifier: ^1.17.1 specifier: ^1.18.0
version: 1.17.1(typescript@5.2.2)(zod@3.22.4) version: 1.18.0(typescript@5.2.2)(zod@3.22.4)
wagmi: wagmi:
specifier: '>1.0.0' specifier: '>1.0.0'
version: 1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) version: 1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
devDependencies: devDependencies:
'@eth-optimism/contracts-bedrock': '@eth-optimism/contracts-bedrock':
specifier: workspace:* specifier: workspace:*
...@@ -324,7 +324,7 @@ importers: ...@@ -324,7 +324,7 @@ importers:
version: 1.5.2(@wagmi/core@1.4.5)(typescript@5.2.2)(wagmi@1.0.1) version: 1.5.2(@wagmi/core@1.4.5)(typescript@5.2.2)(wagmi@1.0.1)
'@wagmi/core': '@wagmi/core':
specifier: ^1.4.5 specifier: ^1.4.5
version: 1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) version: 1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
abitype: abitype:
specifier: ^0.10.1 specifier: ^0.10.1
version: 0.10.1(typescript@5.2.2) version: 0.10.1(typescript@5.2.2)
...@@ -438,8 +438,8 @@ importers: ...@@ -438,8 +438,8 @@ importers:
specifier: ^5.2.2 specifier: ^5.2.2
version: 5.2.2 version: 5.2.2
viem: viem:
specifier: ^1.17.1 specifier: ^1.18.0
version: 1.17.1(typescript@5.2.2)(zod@3.22.4) version: 1.18.0(typescript@5.2.2)(zod@3.22.4)
vite: vite:
specifier: ^4.5.0 specifier: ^4.5.0
version: 4.5.0(@types/node@20.8.9) version: 4.5.0(@types/node@20.8.9)
...@@ -529,8 +529,8 @@ importers: ...@@ -529,8 +529,8 @@ importers:
specifier: ^5.2.2 specifier: ^5.2.2
version: 5.2.2 version: 5.2.2
viem: viem:
specifier: ^1.17.1 specifier: ^1.18.0
version: 1.17.1(typescript@5.2.2)(zod@3.22.4) version: 1.18.0(typescript@5.2.2)(zod@3.22.4)
vitest: vitest:
specifier: ^0.34.2 specifier: ^0.34.2
version: 0.34.2 version: 0.34.2
...@@ -569,8 +569,8 @@ importers: ...@@ -569,8 +569,8 @@ importers:
specifier: ^5.2.2 specifier: ^5.2.2
version: 5.2.2 version: 5.2.2
viem: viem:
specifier: ^1.17.1 specifier: ^1.18.0
version: 1.17.1(typescript@5.2.2)(zod@3.22.4) version: 1.18.0(typescript@5.2.2)(zod@3.22.4)
vite: vite:
specifier: ^4.5.0 specifier: ^4.5.0
version: 4.5.0(@types/node@20.8.9) version: 4.5.0(@types/node@20.8.9)
...@@ -3219,7 +3219,7 @@ packages: ...@@ -3219,7 +3219,7 @@ packages:
resolution: {integrity: sha512-gYw0ki/EAuV1oSyMxpqandHjnthZjYYy+YWpTAzf8BqfXM3ItcZLpjxfg+3+mXW8HIO+3jw6T9iiqEXsqHaMMw==} resolution: {integrity: sha512-gYw0ki/EAuV1oSyMxpqandHjnthZjYYy+YWpTAzf8BqfXM3ItcZLpjxfg+3+mXW8HIO+3jw6T9iiqEXsqHaMMw==}
dependencies: dependencies:
'@safe-global/safe-gateway-typescript-sdk': 3.7.3 '@safe-global/safe-gateway-typescript-sdk': 3.7.3
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
transitivePeerDependencies: transitivePeerDependencies:
- bufferutil - bufferutil
- encoding - encoding
...@@ -4584,7 +4584,7 @@ packages: ...@@ -4584,7 +4584,7 @@ packages:
wagmi: wagmi:
optional: true optional: true
dependencies: dependencies:
'@wagmi/core': 1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) '@wagmi/core': 1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3) abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3)
abort-controller: 3.0.0 abort-controller: 3.0.0
bundle-require: 3.1.2(esbuild@0.16.17) bundle-require: 3.1.2(esbuild@0.16.17)
...@@ -4606,15 +4606,15 @@ packages: ...@@ -4606,15 +4606,15 @@ packages:
picocolors: 1.0.0 picocolors: 1.0.0
prettier: 2.8.8 prettier: 2.8.8
typescript: 5.2.2 typescript: 5.2.2
viem: 1.17.1(typescript@5.2.2)(zod@3.22.3) viem: 1.18.0(typescript@5.2.2)(zod@3.22.3)
wagmi: 1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) wagmi: 1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
zod: 3.22.3 zod: 3.22.3
transitivePeerDependencies: transitivePeerDependencies:
- bufferutil - bufferutil
- utf-8-validate - utf-8-validate
dev: true dev: true
/@wagmi/connectors@1.0.1(@wagmi/chains@0.2.22)(react@18.2.0)(typescript@5.2.2)(viem@1.17.1): /@wagmi/connectors@1.0.1(@wagmi/chains@0.2.22)(react@18.2.0)(typescript@5.2.2)(viem@1.18.0):
resolution: {integrity: sha512-fl01vym19DE1uoE+MlASw5zo3Orr/YXlJRjOKLaKYtV+Q7jOLY4TwHgq7sEMs+JYOvFICFBEAlWNNxidr51AqQ==} resolution: {integrity: sha512-fl01vym19DE1uoE+MlASw5zo3Orr/YXlJRjOKLaKYtV+Q7jOLY4TwHgq7sEMs+JYOvFICFBEAlWNNxidr51AqQ==}
peerDependencies: peerDependencies:
'@wagmi/chains': '>=0.2.0' '@wagmi/chains': '>=0.2.0'
...@@ -4637,7 +4637,7 @@ packages: ...@@ -4637,7 +4637,7 @@ packages:
abitype: 0.8.1(typescript@5.2.2) abitype: 0.8.1(typescript@5.2.2)
eventemitter3: 4.0.7 eventemitter3: 4.0.7
typescript: 5.2.2 typescript: 5.2.2
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
transitivePeerDependencies: transitivePeerDependencies:
- '@react-native-async-storage/async-storage' - '@react-native-async-storage/async-storage'
- bufferutil - bufferutil
...@@ -4649,7 +4649,7 @@ packages: ...@@ -4649,7 +4649,7 @@ packages:
- utf-8-validate - utf-8-validate
- zod - zod
/@wagmi/connectors@3.1.3(react@18.2.0)(typescript@5.2.2)(viem@1.17.1): /@wagmi/connectors@3.1.3(react@18.2.0)(typescript@5.2.2)(viem@1.18.0):
resolution: {integrity: sha512-UgwsQKQDFObJVJMf9pDfFoXTv710o4zrTHyhIWKBTMMkLpCMsMxN5+ZaDhBYt/BgoRinfRYQo8uwuwLhxE6Log==} resolution: {integrity: sha512-UgwsQKQDFObJVJMf9pDfFoXTv710o4zrTHyhIWKBTMMkLpCMsMxN5+ZaDhBYt/BgoRinfRYQo8uwuwLhxE6Log==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
...@@ -4669,7 +4669,7 @@ packages: ...@@ -4669,7 +4669,7 @@ packages:
abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3) abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3)
eventemitter3: 4.0.7 eventemitter3: 4.0.7
typescript: 5.2.2 typescript: 5.2.2
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
transitivePeerDependencies: transitivePeerDependencies:
- '@react-native-async-storage/async-storage' - '@react-native-async-storage/async-storage'
- '@types/react' - '@types/react'
...@@ -4682,7 +4682,7 @@ packages: ...@@ -4682,7 +4682,7 @@ packages:
- zod - zod
dev: true dev: true
/@wagmi/core@1.0.1(react@18.2.0)(typescript@5.2.2)(viem@1.17.1): /@wagmi/core@1.0.1(react@18.2.0)(typescript@5.2.2)(viem@1.18.0):
resolution: {integrity: sha512-Zzg4Ob92QMF9NsC+z5/8JZjMn3NCCnwVWGJlv79qRX9mp5Ku40OzJNvqDnjcSGjshe6H0L/KtFZAqTlmu8lT7w==} resolution: {integrity: sha512-Zzg4Ob92QMF9NsC+z5/8JZjMn3NCCnwVWGJlv79qRX9mp5Ku40OzJNvqDnjcSGjshe6H0L/KtFZAqTlmu8lT7w==}
peerDependencies: peerDependencies:
typescript: '>=4.9.4' typescript: '>=4.9.4'
...@@ -4692,11 +4692,11 @@ packages: ...@@ -4692,11 +4692,11 @@ packages:
optional: true optional: true
dependencies: dependencies:
'@wagmi/chains': 0.2.22(typescript@5.2.2) '@wagmi/chains': 0.2.22(typescript@5.2.2)
'@wagmi/connectors': 1.0.1(@wagmi/chains@0.2.22)(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) '@wagmi/connectors': 1.0.1(@wagmi/chains@0.2.22)(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
abitype: 0.8.1(typescript@5.2.2) abitype: 0.8.1(typescript@5.2.2)
eventemitter3: 4.0.7 eventemitter3: 4.0.7
typescript: 5.2.2 typescript: 5.2.2
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
zustand: 4.3.9(react@18.2.0) zustand: 4.3.9(react@18.2.0)
transitivePeerDependencies: transitivePeerDependencies:
- '@react-native-async-storage/async-storage' - '@react-native-async-storage/async-storage'
...@@ -4710,7 +4710,7 @@ packages: ...@@ -4710,7 +4710,7 @@ packages:
- utf-8-validate - utf-8-validate
- zod - zod
/@wagmi/core@1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.17.1): /@wagmi/core@1.4.5(react@18.2.0)(typescript@5.2.2)(viem@1.18.0):
resolution: {integrity: sha512-N9luRb1Uk4tBN9kaYcQSWKE9AsRt/rvZaFt5IZech4JPzNN2sQlfhKd9GEjOXYRDqEPHdDvos7qyBKiDNTz4GA==} resolution: {integrity: sha512-N9luRb1Uk4tBN9kaYcQSWKE9AsRt/rvZaFt5IZech4JPzNN2sQlfhKd9GEjOXYRDqEPHdDvos7qyBKiDNTz4GA==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
...@@ -4719,11 +4719,11 @@ packages: ...@@ -4719,11 +4719,11 @@ packages:
typescript: typescript:
optional: true optional: true
dependencies: dependencies:
'@wagmi/connectors': 3.1.3(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) '@wagmi/connectors': 3.1.3(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3) abitype: 0.8.7(typescript@5.2.2)(zod@3.22.3)
eventemitter3: 4.0.7 eventemitter3: 4.0.7
typescript: 5.2.2 typescript: 5.2.2
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
zustand: 4.3.9(react@18.2.0) zustand: 4.3.9(react@18.2.0)
transitivePeerDependencies: transitivePeerDependencies:
- '@react-native-async-storage/async-storage' - '@react-native-async-storage/async-storage'
...@@ -14318,8 +14318,8 @@ packages: ...@@ -14318,8 +14318,8 @@ packages:
vfile-message: 2.0.4 vfile-message: 2.0.4
dev: true dev: true
/viem@1.17.1(typescript@5.2.2)(zod@3.22.3): /viem@1.18.0(typescript@5.2.2)(zod@3.22.3):
resolution: {integrity: sha512-MSbrfntjgIMKPUPdNJ1pnwT1pDfnOzJnKSLqpafw1q+1k6k6M/jxn09g3WbKefIKIok122DcbmviMow+4FqkAg==} resolution: {integrity: sha512-NeKi5RFj7fHdsnk5pojivHFLkTyBWyehxeSE/gSPTDJKCWnR9i+Ra0W++VwN5ghciEG55O8b4RdpYhzGmhnr7A==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
peerDependenciesMeta: peerDependenciesMeta:
...@@ -14341,8 +14341,8 @@ packages: ...@@ -14341,8 +14341,8 @@ packages:
- zod - zod
dev: true dev: true
/viem@1.17.1(typescript@5.2.2)(zod@3.22.4): /viem@1.18.0(typescript@5.2.2)(zod@3.22.4):
resolution: {integrity: sha512-MSbrfntjgIMKPUPdNJ1pnwT1pDfnOzJnKSLqpafw1q+1k6k6M/jxn09g3WbKefIKIok122DcbmviMow+4FqkAg==} resolution: {integrity: sha512-NeKi5RFj7fHdsnk5pojivHFLkTyBWyehxeSE/gSPTDJKCWnR9i+Ra0W++VwN5ghciEG55O8b4RdpYhzGmhnr7A==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
peerDependenciesMeta: peerDependenciesMeta:
...@@ -14835,7 +14835,7 @@ packages: ...@@ -14835,7 +14835,7 @@ packages:
xml-name-validator: 4.0.0 xml-name-validator: 4.0.0
dev: true dev: true
/wagmi@1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.17.1): /wagmi@1.0.1(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2)(viem@1.18.0):
resolution: {integrity: sha512-+2UkZG9eA3tKqXj1wvlvI8mL0Bcff7Tf5CKfUOyQsdKcY+J5rfwYYya25G+jja57umpHFtfxRaL7xDkNjehrRg==} resolution: {integrity: sha512-+2UkZG9eA3tKqXj1wvlvI8mL0Bcff7Tf5CKfUOyQsdKcY+J5rfwYYya25G+jja57umpHFtfxRaL7xDkNjehrRg==}
peerDependencies: peerDependencies:
react: '>=17.0.0' react: '>=17.0.0'
...@@ -14848,12 +14848,12 @@ packages: ...@@ -14848,12 +14848,12 @@ packages:
'@tanstack/query-sync-storage-persister': 4.29.25 '@tanstack/query-sync-storage-persister': 4.29.25
'@tanstack/react-query': 4.29.25(react-dom@18.2.0)(react@18.2.0) '@tanstack/react-query': 4.29.25(react-dom@18.2.0)(react@18.2.0)
'@tanstack/react-query-persist-client': 4.29.25(@tanstack/react-query@4.29.25) '@tanstack/react-query-persist-client': 4.29.25(@tanstack/react-query@4.29.25)
'@wagmi/core': 1.0.1(react@18.2.0)(typescript@5.2.2)(viem@1.17.1) '@wagmi/core': 1.0.1(react@18.2.0)(typescript@5.2.2)(viem@1.18.0)
abitype: 0.8.1(typescript@5.2.2) abitype: 0.8.1(typescript@5.2.2)
react: 18.2.0 react: 18.2.0
typescript: 5.2.2 typescript: 5.2.2
use-sync-external-store: 1.2.0(react@18.2.0) use-sync-external-store: 1.2.0(react@18.2.0)
viem: 1.17.1(typescript@5.2.2)(zod@3.22.4) viem: 1.18.0(typescript@5.2.2)(zod@3.22.4)
transitivePeerDependencies: transitivePeerDependencies:
- '@react-native-async-storage/async-storage' - '@react-native-async-storage/async-storage'
- bufferutil - bufferutil
......
...@@ -471,9 +471,8 @@ channels available. These transactions carry one or more full frames, which may ...@@ -471,9 +471,8 @@ channels available. These transactions carry one or more full frames, which may
channel's frame may be split between multiple batcher transactions. channel's frame may be split between multiple batcher transactions.
When submitted to Ethereum calldata, the batcher transaction's receiver must be the sequencer inbox address. The When submitted to Ethereum calldata, the batcher transaction's receiver must be the sequencer inbox address. The
transaction must also be signed by a recognized batch submitter account. transaction must also be signed by a recognized batch submitter account. The recognized batch submitter account
is stored in the [System Configuration][system-config].
> **TODO** specify where these recognized batch submitter accounts are stored
## Channel Timeout ## Channel Timeout
......
# Safe Liveness Checking
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Liveness checking Mechanism](#liveness-checking-mechanism)
- [Liveness checking methodology](#liveness-checking-methodology)
- [The liveness guard](#the-liveness-guard)
- [The liveness module](#the-liveness-module)
- [Owner removal call flow](#owner-removal-call-flow)
- [Shutdown](#shutdown)
- [Security Properties](#security-properties)
- [Interdependency between the guard and module](#interdependency-between-the-guard-and-module)
- [Deploying the liveness checking system](#deploying-the-liveness-checking-system)
- [Modify the liveness checking system](#modify-the-liveness-checking-system)
- [Replacing the module](#replacing-the-module)
- [Replacing the guard](#replacing-the-guard)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Liveness checking Mechanism
The Security Security Council uses a specially extended Safe multisig contract to ensure that
any loss of access to a signer's keys is identified and addressed within a predictable period of
time.
This mechanism is intended only to be used to remove signers who have lost access to their keys, or
are otherwise inactive. It is not intended to be used to remove signers who are acting in bad faith,
or any other subjective criteria, such cases should be addressed by governance, and the removal
handled via the standard Safe ownership management functionality.
## Liveness checking methodology
This is achieved using two types of contracts which the Safe contract has built-in support for:
1. **Guard contracts:** can execute pre- and post- transaction checks.
1. **Module contracts:** a contract which is added to the Safe by the signers, and thenceforth is
authorized to execute transactions via the Safe. This means the module must properly implement
auth conditions internally.
### The liveness guard
For implementing liveness checks a `LivenessGuard` is created which receives the signatures from
each executed transaction, and tracks the latest time at which a transaction was signed by each
signer. This time is made publicly available by calling a `lastLive(address)(Timestamp)` method.
Signers may also call the contract's `showLiveness()()` method directly in order to prove liveness.
### The liveness module
A `LivenessModule` is also created which does the following:
1. Has a function `removeOwners()` that anyone may call to specify one or more owners to be removed from the
Safe.
1. The Module would then check the `LivenessGuard.lastLive()` to determine if the signer is
eligible for removal.
1. If so, it will call the Safe's `removeSigner()` to remove the non-live signer, and if necessary
reduce the threshold.
1. When a member is removed, the signing parameters are modified such that `M/N` is the lowest ratio
which remains greater than or equal to 75%. Using integer math, this can be expressed as `M = (N * 75 + 99) / 100`.
### Owner removal call flow
The following diagram illustrates the flow for removing a single owner. The `verifyFinalState`
box indicates calls to the Safe which ensure the final state is valid.
```mermaid
sequenceDiagram
participant User
participant LivenessModule
participant LivenessGuard
participant Safe
User->>LivenessModule: removeOwners([previousOwner], [owner])
LivenessModule->>LivenessGuard: lastLive(owner)
LivenessModule->>Safe: getOwners()
LivenessModule->>Safe: removeOwner(previousOwner, owner)
alt verifyFinalState
LivenessModule->>Safe: getOwners()
LivenessModule->>Safe: getThreshold()
LivenessModule->>Safe: getGuard()
end
```
### Shutdown
In the unlikely event that the signer set (`N`) is reduced below the allowed threshold, then (and only then) is a
shutdown mechanism activated which removes the existing signers, and hands control of the
multisig over to a predetermined entity.
### Security Properties
The following security properties must be upheld:
1. Signatures are assigned to the correct signer.
1. Non-signers are unable to create a record of having signed.
1. A signer cannot be censored or griefed such that their signing is not recorded.
1. Signers may demonstrate liveness either by signing a transaction or by calling directly to the
guard.
1. The module only removes a signer if they have demonstrated liveness during the interval, or
if necessary to convert the safe to a 1 of 1.
1. The module sets the correct 75% threshold upon removing a signer.
1. During a shutdown the module correctly removes all signers, and converts the safe to a 1 of 1.
1. It must be impossible for the guard's checkTransaction or checkAfterExecution to permanently
revert given any calldata and the current state.
Note: neither the module nor guard attempt to prevent a quorum of owners from removing either the liveness
module or guard. There are legitimate reasons they might wish to do so. Moreover, if such a quorum
of owners exists, there is no benefit to removing them, as they are defacto 'sufficiently live'.
### Interdependency between the guard and module
The guard has no dependency on the module, and can be used independently to track liveness of
Safe owners.
This means that the module can be removed or replaced without any affect on the guard.
The module however does have a dependency on the guard; if the guard is removed from the Safe, then
the module will no longer be functional and calls to its `removeOwners` function will revert.
### Deploying the liveness checking system
[deploying]: #deploying-the-liveness-checking-system
The module and guard are intended to be deployed and installed on the safe in the following
sequence:
1. Deploy the guard contract
2. The guard's constructor will read the Safe's owners and set a timestamp
1. Deploy the module.
1. Set the guard on the safe.
1. Enable the module on the safe.
This order of operations is necessary to satisfy the constructor checks in the module, and is
intended to prevent owners from being immediately removable.
Note that changes to the owners set should not be made between the time the module is deployed, and
when it is enabled on the Safe, otherwise the checks made in the module's constructor may be
invalidated. If such changes are made, a new module should be deployed.
### Modify the liveness checking system
Changes to the liveness checking system should be done in the following manner:
#### Replacing the module
The module can safely be removed without affecting the operation of the guard. A new module can then
be added.
Note: none of the module's parameters are modifiable. In order to update the security properties
enforced by the module, it must be replaced.
#### Replacing the guard
The safe can only have one guard contract at a time, and if the guard is removed the module will
cease to function. This does not affect the ability of the Safe to operate normally, however the
module should be removed as a best practice.
If a new guard is added, eg. as a means of upgrading it, then a new module will also need to be
deployed and enabled. Once both the guard and module have been removed, they can be replaced
according to the steps in the [Deployment][deploying] section above.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment