Commit 7fe3a559 authored by Mark Tyneway's avatar Mark Tyneway Committed by GitHub

Merge pull request #2410 from ethereum-optimism/develop

Develop -> Master PR
parents 49115fbd 0c4d4e08
---
'@eth-optimism/l2geth': patch
---
Style fix in the sync service
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/gas-oracle': patch
'@eth-optimism/indexer': patch
'@eth-optimism/l2geth-exporter': patch
'@eth-optimism/op-exporter': patch
'@eth-optimism/proxyd': patch
'@eth-optimism/teleportr': patch
'@eth-optimism/l2geth': patch
---
Update docker image to use golang 1.18.0
---
'@eth-optimism/contracts': patch
---
Deleted update and helper functions/tests from Lib_MerkleTrie.sol and Lib_SecureMerkleTrie.sol
---
'@eth-optimism/l2geth': patch
---
Skip account cmd tests
---
'@eth-optimism/l2geth': patch
---
Skip unused tests in l2geth
---
'@eth-optimism/go-builder': patch
'@eth-optimism/js-builder': patch
---
Add to changesets
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/l2geth': patch
---
l2geth: Revert transaction pubsub feature
---
'@eth-optimism/batch-submitter': patch
'@eth-optimism/common-ts': patch
'@eth-optimism/contracts': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/message-relayer': patch
'@eth-optimism/replica-healthcheck': patch
'@eth-optimism/sdk': patch
---
Update to typescript@4.6.2
...@@ -37,7 +37,4 @@ M-ops: ...@@ -37,7 +37,4 @@ M-ops:
- any: ['ops/**/*'] - any: ['ops/**/*']
C-Protocol-Critical: C-Protocol-Critical:
- any: - any: ['packages/data-transport-layer/**/*.ts', 'packages/contracts/**/*.sol', 'l2geth/**/*.go']
- 'packages/data-transport-layer/**/*.ts'
- 'packages/contracts/**/*.sol'
- 'l2geth/**/*.go'
\ No newline at end of file
...@@ -5,21 +5,22 @@ queue_rules: ...@@ -5,21 +5,22 @@ queue_rules:
pull_request_rules: pull_request_rules:
- name: Automatic merge on approval - name: Automatic merge on approval
conditions: conditions:
- or: - and:
- and: - "#review-threads-unresolved=0"
- "label!=SR-Risk" - "#approved-reviews-by>=2"
- "label!=C-Protocol-Critical" - "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=2" - or:
- and: - and:
- "label=SR-Risk" - "label!=SR-Risk"
- "#approved-reviews-by>=2" - "label!=C-Protocol-Critical"
- "approved-reviews-by=maurelian" - and:
- and: - "label=SR-Risk"
- "label=C-Protocol-Critical" - "approved-reviews-by=maurelian"
- "#approved-reviews-by>=2" - and:
- or: - "label=C-Protocol-Critical"
- "approved-reviews-by=tynes" - or:
- "approved-reviews-by=smartcontracts" - "approved-reviews-by=tynes"
- "approved-reviews-by=smartcontracts"
actions: actions:
queue: queue:
name: default name: default
...@@ -50,11 +51,9 @@ pull_request_rules: ...@@ -50,11 +51,9 @@ pull_request_rules:
request_reviews: request_reviews:
users: users:
- cfromknecht - cfromknecht
- tynes
- mslipper - mslipper
- inphi - inphi
- tuxcanfly - tuxcanfly
- smartcontracts
random_count: 2 random_count: 2
- name: Request protocol critical reviewers - name: Request protocol critical reviewers
conditions: conditions:
...@@ -71,10 +70,30 @@ pull_request_rules: ...@@ -71,10 +70,30 @@ pull_request_rules:
actions: actions:
comment: comment:
message: Hey @{{author}}! This PR has merge conflicts. Please fix them before continuing review. message: Hey @{{author}}! This PR has merge conflicts. Please fix them before continuing review.
label:
add:
- conflict
- name: Remove conflicts label when conflicts gone
conditions:
- -conflict
actions:
label:
remove:
- conflict
- name: Notify author when added to merge queue - name: Notify author when added to merge queue
conditions: conditions:
- "check-pending=Queue: Embarked in merge train" - "check-pending=Queue: Embarked in merge train"
actions: actions:
comment: comment:
message: | message: |
This PR is next in line to be merged, and will be merged as soon as checks pass. This PR is next in line to be merged, and will be merged as soon as checks pass.
\ No newline at end of file - name: Notify author when merge queue failed
conditions:
- "check-failure=Queue: Embarked in merge train"
actions:
comment:
message: |
Merge failed. Please see automated check logs for more details.
label:
remove:
- on-merge-train
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
with:
stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
exempt-pr-labels: exempt-stale
days-before-issue-stale: 999
dats-before-pr-stale: 14
days-before-close: 5
repo-token: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
...@@ -28,6 +28,7 @@ jobs: ...@@ -28,6 +28,7 @@ jobs:
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }} l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }} batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
indexer: ${{ steps.packages.outputs.indexer }} indexer: ${{ steps.packages.outputs.indexer }}
teleportr: ${{ steps.packages.outputs.teleportr }}
steps: steps:
- name: Check out source code - name: Check out source code
...@@ -471,7 +472,44 @@ jobs: ...@@ -471,7 +472,44 @@ jobs:
context: . context: .
file: ./ops/docker/Dockerfile.indexer file: ./ops/docker/Dockerfile.indexer
push: true push: true
tags: ethereumoptimism/batch-submitter-service:${{ needs.canary-publish.outputs.indexer }} tags: ethereumoptimism/indexer:${{ needs.canary-publish.outputs.indexer }}
build-args: |
GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
teleportr:
name: Publish Teleportr Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.teleportr != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set build args
id: build_args
run: |
echo ::set-output name=GITDATE::"$(date +%d-%m-%Y)"
echo ::set-output name=GITVERSION::$(jq -r .version ./go/teleportr/package.json)
echo ::set-output name=GITCOMMIT::"$GITHUB_SHA"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.teleportr
push: true
tags: ethereumoptimism/teleportr:${{ needs.canary-publish.outputs.teleportr }}
build-args: | build-args: |
GITDATE=${{ steps.build_args.outputs.GITDATE }} GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }} GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
......
...@@ -23,6 +23,9 @@ jobs: ...@@ -23,6 +23,9 @@ jobs:
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }} l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }} batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
indexer: ${{ steps.packages.outputs.indexer }} indexer: ${{ steps.packages.outputs.indexer }}
teleportr: ${{ steps.packages.outputs.teleportr }}
go-builder: ${{ steps.packages.outputs.go-builder }}
js-builder: ${{ steps.packages.outputs.js-builder }}
steps: steps:
- name: Checkout Repo - name: Checkout Repo
...@@ -146,6 +149,58 @@ jobs: ...@@ -146,6 +149,58 @@ jobs:
push: true push: true
tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.gas-oracle }},ethereumoptimism/hardhat-node:latest tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.gas-oracle }},ethereumoptimism/hardhat-node:latest
go-builder:
name: Publish go-builder ${{ needs.release.outputs.go-builder }}
needs: release
if: needs.release.go-builder != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish go-builder
uses: docker/build-push-action@v2
with:
context: ./ops/docker/go-builder
file: ./Dockerfile
push: true
tags: ethereumoptimism/go-builder:${{ needs.release.outputs.go-builder }},ethereumoptimism/go-builder:latest
js-builder:
name: Publish js-builder ${{ needs.release.outputs.js-builder }}
needs: release
if: needs.release.js-builder != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish js-builder
uses: docker/build-push-action@v2
with:
context: ./ops/docker/js-builder
file: ./Dockerfile
push: true
tags: ethereumoptimism/js-builder:${{ needs.release.outputs.js-builder }},ethereumoptimism/js-builder:latest
proxyd: proxyd:
name: Publish proxyd Version ${{ needs.release.outputs.proxyd }} name: Publish proxyd Version ${{ needs.release.outputs.proxyd }}
needs: release needs: release
...@@ -454,3 +509,40 @@ jobs: ...@@ -454,3 +509,40 @@ jobs:
GITDATE=${{ steps.build_args.outputs.GITDATE }} GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }} GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }} GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
teleportr:
name: Publish Teleportr Version ${{ needs.release.outputs.teleportr }}
needs: release
if: needs.release.outputs.teleportr != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set build args
id: build_args
run: |
echo ::set-output name=GITDATE::"$(date +%d-%m-%Y)"
echo ::set-output name=GITVERSION::$(jq -r .version ./go/teleportr/package.json)
echo ::set-output name=GITCOMMIT::"$GITHUB_SHA"
- name: Publish Teleportr
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.teleportr
push: true
tags: ethereumoptimism/teleportr:${{ needs.release.outputs.teleportr }},ethereumoptimism/teleportr:latest
build-args: |
GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
This diff is collapsed.
FROM golang:1.17.2-alpine3.13 AS builder FROM golang:1.18.0-alpine3.15 as builder
ARG GITCOMMIT=docker ARG GITCOMMIT=docker
ARG GITDATE=docker ARG GITDATE=docker
...@@ -11,7 +11,7 @@ COPY ./go/proxyd /app ...@@ -11,7 +11,7 @@ COPY ./go/proxyd /app
RUN make proxyd RUN make proxyd
FROM alpine:3.14.2 FROM alpine:3.15
COPY ./go/proxyd/entrypoint.sh /bin/entrypoint.sh COPY ./go/proxyd/entrypoint.sh /bin/entrypoint.sh
......
...@@ -14,12 +14,12 @@ resources: ...@@ -14,12 +14,12 @@ resources:
- ../../bases/l2geth-replica - ../../bases/l2geth-replica
- ../../bases/servicemonitors - ../../bases/servicemonitors
- ../../bases/replica-healthcheck - ../../bases/replica-healthcheck
- ./l2geth-volume.yaml - ./volumes.yaml
images: images:
- name: ethereumoptimism/data-transport-layer - name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer newName: ethereumoptimism/data-transport-layer
newTag: 0.5.18 newTag: 0.5.24
- name: ethereumoptimism/l2geth - name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth newName: ethereumoptimism/l2geth
newTag: 0.5.13 newTag: 0.5.13
...@@ -38,4 +38,10 @@ patches: ...@@ -38,4 +38,10 @@ patches:
group: apps group: apps
version: v1 version: v1
kind: StatefulSet kind: StatefulSet
name: l2geth-replica name: l2geth-replica
\ No newline at end of file - path: ./patches/dtl-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: data-transport-layer
\ No newline at end of file
---
- op: replace
path: /spec/template/spec/volumes/0
value:
name: data-transport-layer
persistentVolumeClaim:
claimName: data-transport-layer-data
...@@ -10,3 +10,15 @@ spec: ...@@ -10,3 +10,15 @@ spec:
resources: resources:
requests: requests:
storage: 500Gi storage: 500Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-transport-layer-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
\ No newline at end of file
...@@ -6,12 +6,12 @@ commonLabels: ...@@ -6,12 +6,12 @@ commonLabels:
provider: internal provider: internal
bases: bases:
- ../../envs/kovan-gen5-berlin - ../../../envs/kovan-gen5-berlin
- ../../../scripts
resources: resources:
- ../../bases/data-transport-layer - ../../bases/data-transport-layer
- ../../bases/l2geth-replica - ../../bases/l2geth-replica
- ../../bases/configmaps
- ../../bases/servicemonitors - ../../bases/servicemonitors
- ../../bases/replica-healthcheck - ../../bases/replica-healthcheck
- ./volumes.yaml - ./volumes.yaml
...@@ -19,7 +19,7 @@ resources: ...@@ -19,7 +19,7 @@ resources:
images: images:
- name: ethereumoptimism/data-transport-layer - name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer newName: ethereumoptimism/data-transport-layer
newTag: 0.5.21 newTag: 0.5.24
- name: ethereumoptimism/l2geth - name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth newName: ethereumoptimism/l2geth
newTag: 0.5.14 newTag: 0.5.14
......
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kovan-replica-0-5-15
commonLabels:
network: kovan
provider: internal
bases:
- ../../../envs/kovan-gen5-berlin
- ../../../scripts
resources:
- ../../bases/data-transport-layer
- ../../bases/l2geth-replica
- ../../bases/servicemonitors
- ../../bases/replica-healthcheck
- ./volumes.yaml
images:
- name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer
newTag: 0.5.24
- name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth
newTag: 0.5.15
- name: ethereumoptimism/replica-healthcheck
newName: ethereumoptimism/replica-healthcheck
newTag: 0.3.3
patchesStrategicMerge:
- ./patches/dtl.yaml
- ./patches/l2geth.yaml
- ./patches/replica-healthcheck.yaml
patches:
- path: ./patches/l2geth-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: l2geth-replica
- path: ./patches/dtl-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: data-transport-layer
\ No newline at end of file
---
- op: replace
path: /spec/template/spec/volumes/0
value:
name: data-transport-layer
persistentVolumeClaim:
claimName: data-transport-layer-data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: data-transport-layer
spec:
template:
spec:
initContainers:
- name: wait-for-l1
env:
- name: L1_NODE_WEB3_URL
value: http://failover-proxyd.default:8080
containers:
- name: data-transport-layer
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
env:
- name: DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT
value: http://failover-proxyd.default:8080
- name: DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT
value: http://sequencer.default:8545
- name: L1_NODE_WEB3_URL
value: http://failover-proxyd.default:8080
\ No newline at end of file
- op: replace
path: /spec/template/spec/volumes/0
value:
name: l2geth-replica-data
persistentVolumeClaim:
claimName: l2geth-replica-data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: l2geth-replica
spec:
template:
spec:
containers:
- name: l2geth-replica
env:
- name: IPC_DISABLE
value: "false"
resources:
limits:
cpu: "4"
memory: 12Gi
requests:
cpu: "2"
memory: 8Gi
apiVersion: apps/v1
kind: Deployment
metadata:
name: replica-healthcheck
spec:
template:
spec:
containers:
- name: replica-healthcheck
env:
- name: REPLICA_HEALTHCHECK__ETH_NETWORK_RPC_PROVIDER
value: http://sequencer.default:8545
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: l2geth-replica-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-transport-layer-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
\ No newline at end of file
...@@ -20,7 +20,7 @@ resources: ...@@ -20,7 +20,7 @@ resources:
images: images:
- name: ethereumoptimism/data-transport-layer - name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer newName: ethereumoptimism/data-transport-layer
newTag: 0.5.21 newTag: 0.5.24
- name: ethereumoptimism/l2geth - name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth newName: ethereumoptimism/l2geth
newTag: 0.5.14 newTag: 0.5.14
......
...@@ -20,7 +20,7 @@ resources: ...@@ -20,7 +20,7 @@ resources:
images: images:
- name: ethereumoptimism/data-transport-layer - name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer newName: ethereumoptimism/data-transport-layer
newTag: 0.5.11 newTag: 0.5.24
- name: ethereumoptimism/l2geth - name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth newName: ethereumoptimism/l2geth
newTag: 0.5.9 newTag: 0.5.9
......
...@@ -48,6 +48,7 @@ func TestAccountListEmpty(t *testing.T) { ...@@ -48,6 +48,7 @@ func TestAccountListEmpty(t *testing.T) {
} }
func TestAccountList(t *testing.T) { func TestAccountList(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, "account", "list", "--datadir", datadir) geth := runGeth(t, "account", "list", "--datadir", datadir)
defer geth.ExpectExit() defer geth.ExpectExit()
...@@ -67,6 +68,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k ...@@ -67,6 +68,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k
} }
func TestAccountNew(t *testing.T) { func TestAccountNew(t *testing.T) {
t.Skip()
geth := runGeth(t, "account", "new", "--lightkdf") geth := runGeth(t, "account", "new", "--lightkdf")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
...@@ -89,6 +91,7 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40} ...@@ -89,6 +91,7 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40}
} }
func TestAccountNewBadRepeat(t *testing.T) { func TestAccountNewBadRepeat(t *testing.T) {
t.Skip()
geth := runGeth(t, "account", "new", "--lightkdf") geth := runGeth(t, "account", "new", "--lightkdf")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
...@@ -101,6 +104,7 @@ Fatal: Passwords do not match ...@@ -101,6 +104,7 @@ Fatal: Passwords do not match
} }
func TestAccountUpdate(t *testing.T) { func TestAccountUpdate(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, "account", "update", geth := runGeth(t, "account", "update",
"--datadir", datadir, "--lightkdf", "--datadir", datadir, "--lightkdf",
...@@ -117,6 +121,7 @@ Repeat password: {{.InputLine "foobar2"}} ...@@ -117,6 +121,7 @@ Repeat password: {{.InputLine "foobar2"}}
} }
func TestWalletImport(t *testing.T) { func TestWalletImport(t *testing.T) {
t.Skip()
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
...@@ -132,6 +137,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f} ...@@ -132,6 +137,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
} }
func TestWalletImportBadPassword(t *testing.T) { func TestWalletImportBadPassword(t *testing.T) {
t.Skip()
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
...@@ -142,6 +148,7 @@ Fatal: could not decrypt key with given password ...@@ -142,6 +148,7 @@ Fatal: could not decrypt key with given password
} }
func TestUnlockFlag(t *testing.T) { func TestUnlockFlag(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -166,6 +173,7 @@ Password: {{.InputLine "foobar"}} ...@@ -166,6 +173,7 @@ Password: {{.InputLine "foobar"}}
} }
func TestUnlockFlagWrongPassword(t *testing.T) { func TestUnlockFlagWrongPassword(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -185,6 +193,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could ...@@ -185,6 +193,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
// https://github.com/ethereum/go-ethereum/issues/1785 // https://github.com/ethereum/go-ethereum/issues/1785
func TestUnlockFlagMultiIndex(t *testing.T) { func TestUnlockFlagMultiIndex(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -212,6 +221,7 @@ Password: {{.InputLine "foobar"}} ...@@ -212,6 +221,7 @@ Password: {{.InputLine "foobar"}}
} }
func TestUnlockFlagPasswordFile(t *testing.T) { func TestUnlockFlagPasswordFile(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -232,6 +242,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) { ...@@ -232,6 +242,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
} }
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) { func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
t.Skip()
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -243,6 +254,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password) ...@@ -243,6 +254,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password)
} }
func TestUnlockFlagAmbiguous(t *testing.T) { func TestUnlockFlagAmbiguous(t *testing.T) {
t.Skip()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runGeth(t, geth := runGeth(t,
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
...@@ -281,6 +293,7 @@ In order to avoid this warning, you need to remove the following duplicate key f ...@@ -281,6 +293,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
} }
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) { func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
t.Skip()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runGeth(t, geth := runGeth(t,
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
......
...@@ -165,15 +165,6 @@ var ( ...@@ -165,15 +165,6 @@ var (
utils.RollupFeeThresholdUpFlag, utils.RollupFeeThresholdUpFlag,
utils.RollupGenesisTimeoutSecondsFlag, utils.RollupGenesisTimeoutSecondsFlag,
utils.SequencerClientHttpFlag, utils.SequencerClientHttpFlag,
utils.TxPublisherEnableFlag,
utils.TxPublisherProjectIDFlag,
utils.TxPublisherTopicIDFlag,
utils.TxPublisherTimeoutFlag,
utils.TxQueueEnableFlag,
utils.TxQueueProjectIDFlag,
utils.TxQueueSubscriptionIDFlag,
utils.TxQueueMaxOutstandingBytesFlag,
utils.TxQueueMaxOutstandingMessagesFlag,
} }
rpcFlags = []cli.Flag{ rpcFlags = []cli.Flag{
......
...@@ -79,15 +79,6 @@ var AppHelpFlagGroups = []flagGroup{ ...@@ -79,15 +79,6 @@ var AppHelpFlagGroups = []flagGroup{
utils.RollupFeeThresholdUpFlag, utils.RollupFeeThresholdUpFlag,
utils.RollupGenesisTimeoutSecondsFlag, utils.RollupGenesisTimeoutSecondsFlag,
utils.SequencerClientHttpFlag, utils.SequencerClientHttpFlag,
utils.TxPublisherEnableFlag,
utils.TxPublisherProjectIDFlag,
utils.TxPublisherTopicIDFlag,
utils.TxPublisherTimeoutFlag,
utils.TxQueueEnableFlag,
utils.TxQueueProjectIDFlag,
utils.TxQueueSubscriptionIDFlag,
utils.TxQueueMaxOutstandingBytesFlag,
utils.TxQueueMaxOutstandingMessagesFlag,
}, },
}, },
{ {
......
...@@ -61,7 +61,6 @@ import ( ...@@ -61,7 +61,6 @@ import (
"github.com/ethereum-optimism/optimism/l2geth/p2p/netutil" "github.com/ethereum-optimism/optimism/l2geth/p2p/netutil"
"github.com/ethereum-optimism/optimism/l2geth/params" "github.com/ethereum-optimism/optimism/l2geth/params"
"github.com/ethereum-optimism/optimism/l2geth/rollup" "github.com/ethereum-optimism/optimism/l2geth/rollup"
"github.com/ethereum-optimism/optimism/l2geth/rollup/pub"
"github.com/ethereum-optimism/optimism/l2geth/rpc" "github.com/ethereum-optimism/optimism/l2geth/rpc"
whisper "github.com/ethereum-optimism/optimism/l2geth/whisper/whisperv6" whisper "github.com/ethereum-optimism/optimism/l2geth/whisper/whisperv6"
pcsclite "github.com/gballet/go-libpcsclite" pcsclite "github.com/gballet/go-libpcsclite"
...@@ -832,7 +831,7 @@ var ( ...@@ -832,7 +831,7 @@ var (
} }
RollupBackendFlag = cli.StringFlag{ RollupBackendFlag = cli.StringFlag{
Name: "rollup.backend", Name: "rollup.backend",
Usage: "Sync backend for verifiers (\"l1\", \"l2\" or \"queue\"), defaults to l1", Usage: "Sync backend for verifiers (\"l1\" or \"l2\"), defaults to l1",
Value: "l1", Value: "l1",
EnvVar: "ROLLUP_BACKEND", EnvVar: "ROLLUP_BACKEND",
} }
...@@ -873,51 +872,6 @@ var ( ...@@ -873,51 +872,6 @@ var (
Usage: "HTTP endpoint for the sequencer client", Usage: "HTTP endpoint for the sequencer client",
EnvVar: "SEQUENCER_CLIENT_HTTP", EnvVar: "SEQUENCER_CLIENT_HTTP",
} }
TxPublisherEnableFlag = cli.BoolFlag{
Name: "txpublisher.enable",
Usage: "Enable transaction logging to PubSub",
EnvVar: "TX_PUBLISHER_ENABLE",
}
TxPublisherProjectIDFlag = cli.StringFlag{
Name: "txpublisher.projectid",
Usage: "GCP project ID for the tx PubSub",
EnvVar: "TX_PUBLISHER_PROJECT_ID",
}
TxPublisherTopicIDFlag = cli.StringFlag{
Name: "txpublisher.topicid",
Usage: "Topic ID used for PubSub",
EnvVar: "TX_PUBLISHER_TOPIC_ID",
}
TxPublisherTimeoutFlag = cli.DurationFlag{
Name: "txpublisher.timeout",
Usage: "Transaction publishing timeout",
EnvVar: "TX_PUBLISHER_TIMEOUT",
}
TxQueueEnableFlag = cli.BoolFlag{
Name: "txqueue.enable",
Usage: "Enable transaction syncing from the Backend Queue",
EnvVar: "TX_QUEUE_ENABLE",
}
TxQueueProjectIDFlag = cli.StringFlag{
Name: "txqueue.projectid",
Usage: "Backend Queue project ID",
EnvVar: "TX_QUEUE_PROJECT_ID",
}
TxQueueSubscriptionIDFlag = cli.StringFlag{
Name: "txqueue.subscriptionid",
Usage: "Transaction Queue subscription ID",
EnvVar: "TX_QUEUE_SUBSCRIPTION_ID",
}
TxQueueMaxOutstandingMessagesFlag = cli.IntFlag{
Name: "txqueue.maxoutstandingmessages",
Usage: "Max number of messages buffered in the transaction queue subscriber",
EnvVar: "TX_QUEUE_MAX_OUTSTANDING_MESSAGES",
}
TxQueueMaxOutstandingBytesFlag = cli.IntFlag{
Name: "txqueue.maxoutstandingbytes",
Usage: "Max outstanding bytes bufferered in the transaction queue subscriber",
EnvVar: "TX_QUEUE_MAX_OUTSTANDING_BYTES",
}
) )
// MakeDataDir retrieves the currently requested data directory, terminating // MakeDataDir retrieves the currently requested data directory, terminating
...@@ -1199,43 +1153,6 @@ func setRollup(ctx *cli.Context, cfg *rollup.Config) { ...@@ -1199,43 +1153,6 @@ func setRollup(ctx *cli.Context, cfg *rollup.Config) {
} }
} }
// UsingOVM
// setTxPublisher configures the transaction logger
func setTxPublisher(ctx *cli.Context, cfg *pub.Config) {
if ctx.GlobalIsSet(TxPublisherEnableFlag.Name) {
cfg.Enable = ctx.GlobalBool(TxPublisherEnableFlag.Name)
}
if ctx.GlobalIsSet(TxPublisherProjectIDFlag.Name) {
cfg.ProjectID = ctx.GlobalString(TxPublisherProjectIDFlag.Name)
}
if ctx.GlobalIsSet(TxPublisherTopicIDFlag.Name) {
cfg.TopicID = ctx.GlobalString(TxPublisherTopicIDFlag.Name)
}
if ctx.GlobalIsSet(TxPublisherTimeoutFlag.Name) {
cfg.Timeout = ctx.GlobalDuration(TxPublisherTimeoutFlag.Name)
}
}
// UsingOVM
// setTxQueueSubscriber configures the Queue Backend
func setTxQueueSubscriber(ctx *cli.Context, cfg *rollup.QueueSubscriberConfig) {
if ctx.GlobalIsSet(TxQueueEnableFlag.Name) {
cfg.Enable = ctx.GlobalBool(TxQueueEnableFlag.Name)
}
if ctx.GlobalIsSet(TxQueueProjectIDFlag.Name) {
cfg.ProjectID = ctx.GlobalString(TxQueueProjectIDFlag.Name)
}
if ctx.GlobalIsSet(TxQueueSubscriptionIDFlag.Name) {
cfg.SubscriptionID = ctx.GlobalString(TxQueueSubscriptionIDFlag.Name)
}
if ctx.GlobalIsSet(TxQueueMaxOutstandingMessagesFlag.Name) {
cfg.MaxOutstandingMessages = ctx.GlobalInt(TxQueueMaxOutstandingMessagesFlag.Name)
}
if ctx.GlobalIsSet(TxQueueMaxOutstandingBytesFlag.Name) {
cfg.MaxOutstandingBytes = ctx.GlobalInt(TxQueueMaxOutstandingBytesFlag.Name)
}
}
// setLes configures the les server and ultra light client settings from the command line flags. // setLes configures the les server and ultra light client settings from the command line flags.
func setLes(ctx *cli.Context, cfg *eth.Config) { func setLes(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(LightLegacyServFlag.Name) { if ctx.GlobalIsSet(LightLegacyServFlag.Name) {
...@@ -1695,8 +1612,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { ...@@ -1695,8 +1612,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setLes(ctx, cfg) setLes(ctx, cfg)
setEth1(ctx, &cfg.Rollup) setEth1(ctx, &cfg.Rollup)
setRollup(ctx, &cfg.Rollup) setRollup(ctx, &cfg.Rollup)
setTxPublisher(ctx, &cfg.TxPublisher)
setTxQueueSubscriber(ctx, &cfg.TxQueueSubscriber)
if ctx.GlobalIsSet(SyncModeFlag.Name) { if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
......
...@@ -32,17 +32,6 @@ func (q QueueOrigin) String() string { ...@@ -32,17 +32,6 @@ func (q QueueOrigin) String() string {
} }
} }
func (q QueueOrigin) MarshalJSON() ([]byte, error) {
switch q {
case QueueOriginSequencer:
return []byte(`"sequencer"`), nil
case QueueOriginL1ToL2:
return []byte(`"l1"`), nil
default:
return []byte(`""`), nil
}
}
func (q *QueueOrigin) UnmarshalJSON(b []byte) error { func (q *QueueOrigin) UnmarshalJSON(b []byte) error {
switch string(b) { switch string(b) {
case "\"sequencer\"": case "\"sequencer\"":
......
...@@ -52,7 +52,6 @@ import ( ...@@ -52,7 +52,6 @@ import (
"github.com/ethereum-optimism/optimism/l2geth/params" "github.com/ethereum-optimism/optimism/l2geth/params"
"github.com/ethereum-optimism/optimism/l2geth/rlp" "github.com/ethereum-optimism/optimism/l2geth/rlp"
"github.com/ethereum-optimism/optimism/l2geth/rollup" "github.com/ethereum-optimism/optimism/l2geth/rollup"
"github.com/ethereum-optimism/optimism/l2geth/rollup/pub"
"github.com/ethereum-optimism/optimism/l2geth/rollup/rcfg" "github.com/ethereum-optimism/optimism/l2geth/rollup/rcfg"
"github.com/ethereum-optimism/optimism/l2geth/rpc" "github.com/ethereum-optimism/optimism/l2geth/rpc"
) )
...@@ -207,19 +206,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { ...@@ -207,19 +206,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
} }
eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain)
var txLogger pub.Publisher eth.syncService, err = rollup.NewSyncService(context.Background(), config.Rollup, eth.txPool, eth.blockchain, eth.chainDb)
txLogger, err = pub.NewGooglePublisher(context.Background(), config.TxPublisher)
if err != nil {
return nil, err
}
var txQueueSubscriber rollup.QueueSubscriber
txQueueSubscriber, err = rollup.NewQueueSubscriber(context.Background(), config.TxQueueSubscriber)
if err != nil {
return nil, err
}
eth.syncService, err = rollup.NewSyncService(context.Background(), config.Rollup, eth.txPool, eth.blockchain, eth.chainDb, txLogger, txQueueSubscriber)
if err != nil { if err != nil {
return nil, fmt.Errorf("Cannot initialize syncservice: %w", err) return nil, fmt.Errorf("Cannot initialize syncservice: %w", err)
} }
......
...@@ -32,7 +32,6 @@ import ( ...@@ -32,7 +32,6 @@ import (
"github.com/ethereum-optimism/optimism/l2geth/miner" "github.com/ethereum-optimism/optimism/l2geth/miner"
"github.com/ethereum-optimism/optimism/l2geth/params" "github.com/ethereum-optimism/optimism/l2geth/params"
"github.com/ethereum-optimism/optimism/l2geth/rollup" "github.com/ethereum-optimism/optimism/l2geth/rollup"
"github.com/ethereum-optimism/optimism/l2geth/rollup/pub"
) )
// DefaultConfig contains default settings for use on the Ethereum main net. // DefaultConfig contains default settings for use on the Ethereum main net.
...@@ -184,8 +183,4 @@ type Config struct { ...@@ -184,8 +183,4 @@ type Config struct {
// Optimism Rollup Config // Optimism Rollup Config
Rollup rollup.Config Rollup rollup.Config
TxPublisher pub.Config
TxQueueSubscriber rollup.QueueSubscriberConfig
} }
...@@ -666,6 +666,7 @@ func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, F ...@@ -666,6 +666,7 @@ func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, F
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) } func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Skip("Unused in Optimism")
t.Parallel() t.Parallel()
tester := newTester() tester := newTester()
......
...@@ -3,7 +3,6 @@ module github.com/ethereum-optimism/optimism/l2geth ...@@ -3,7 +3,6 @@ module github.com/ethereum-optimism/optimism/l2geth
go 1.15 go 1.15
require ( require (
cloud.google.com/go/pubsub v1.18.0
github.com/Azure/azure-storage-blob-go v0.7.0 github.com/Azure/azure-storage-blob-go v0.7.0
github.com/VictoriaMetrics/fastcache v1.6.0 github.com/VictoriaMetrics/fastcache v1.6.0
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847
...@@ -22,7 +21,7 @@ require ( ...@@ -22,7 +21,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-resty/resty/v2 v2.4.0 github.com/go-resty/resty/v2 v2.4.0
github.com/go-stack/stack v1.8.0 github.com/go-stack/stack v1.8.0
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.4.3
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
...@@ -52,9 +51,9 @@ require ( ...@@ -52,9 +51,9 @@ require (
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
golang.org/x/text v0.3.6 golang.org/x/text v0.3.6
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/sourcemap.v1 v1.0.5 // indirect
......
This diff is collapsed.
package rollup
import "github.com/ethereum-optimism/optimism/l2geth/metrics"
var (
pubTxDropCounter = metrics.NewRegisteredCounter("rollup/pub/txdrops", nil)
)
package pub
import (
"context"
"sync"
"time"
"cloud.google.com/go/pubsub"
"github.com/ethereum-optimism/optimism/l2geth/log"
)
const messageOrderingKey = "o"
type Config struct {
Enable bool
ProjectID string
TopicID string
Timeout time.Duration
}
type GooglePublisher struct {
client *pubsub.Client
topic *pubsub.Topic
publishSettings pubsub.PublishSettings
timeout time.Duration
mutex sync.Mutex
}
func NewGooglePublisher(ctx context.Context, config Config) (Publisher, error) {
if !config.Enable {
return &NoopPublisher{}, nil
}
client, err := pubsub.NewClient(ctx, config.ProjectID)
if err != nil {
return nil, err
}
topic := client.Topic(config.TopicID)
topic.EnableMessageOrdering = true
// Publish messages immediately
publishSettings := pubsub.PublishSettings{
DelayThreshold: 0,
CountThreshold: 0,
}
timeout := config.Timeout
if timeout == 0 {
log.Info("Sanitizing publisher timeout to 2 seconds")
timeout = time.Second * 2
}
log.Info("Initialized transaction log to PubSub", "topic", config.TopicID)
return &GooglePublisher{
client: client,
topic: topic,
publishSettings: publishSettings,
timeout: timeout,
}, nil
}
func (p *GooglePublisher) Publish(ctx context.Context, msg []byte) error {
ctx, cancel := context.WithTimeout(ctx, p.timeout)
defer cancel()
pmsg := pubsub.Message{
Data: msg,
OrderingKey: messageOrderingKey,
}
p.mutex.Lock()
// If there was an error previously, clear it out to allow publishing to proceed again
p.topic.ResumePublish(messageOrderingKey)
result := p.topic.Publish(ctx, &pmsg)
_, err := result.Get(ctx)
p.mutex.Unlock()
return err
}
package pub
import "context"
type Publisher interface {
// Publish schedules an ordereed message to be sent
Publish(ctx context.Context, msg []byte) error
}
type NoopPublisher struct{}
func (p *NoopPublisher) Publish(ctx context.Context, msg []byte) error {
return nil
}
package rollup
import (
"context"
"cloud.google.com/go/pubsub"
"github.com/ethereum-optimism/optimism/l2geth/log"
)
type QueueSubscriberMessage interface {
Data() []byte
Ack()
Nack()
}
type QueueSubscriber interface {
ReceiveMessage(ctx context.Context, cb func(ctx context.Context, msg QueueSubscriberMessage)) error
Close() error
}
type QueueSubscriberConfig struct {
Enable bool
ProjectID string
SubscriptionID string
MaxOutstandingMessages int
MaxOutstandingBytes int
}
type queueSubscriber struct {
client *pubsub.Client
sub *pubsub.Subscription
}
func NewQueueSubscriber(ctx context.Context, config QueueSubscriberConfig) (QueueSubscriber, error) {
if !config.Enable {
return &noopQueueSubscriber{}, nil
}
client, err := pubsub.NewClient(ctx, config.ProjectID)
if err != nil {
return nil, err
}
sub := client.Subscription(config.SubscriptionID)
maxOutstandingMsgs := config.MaxOutstandingMessages
if maxOutstandingMsgs == 0 {
maxOutstandingMsgs = 10000
}
maxOutstandingBytes := config.MaxOutstandingBytes
if maxOutstandingBytes == 0 {
maxOutstandingBytes = 1e9
}
sub.ReceiveSettings = pubsub.ReceiveSettings{
MaxOutstandingMessages: maxOutstandingMsgs,
MaxOutstandingBytes: maxOutstandingBytes,
}
log.Info("Created Queue Subscriber", "projectID", config.ProjectID, "subscriptionID", config.SubscriptionID)
return &queueSubscriber{client, sub}, nil
}
func (q *queueSubscriber) ReceiveMessage(ctx context.Context, cb func(ctx context.Context, msg QueueSubscriberMessage)) error {
return q.sub.Receive(ctx, func(ctx context.Context, pmsg *pubsub.Message) {
cb(ctx, &queueSubscriberMessage{pmsg})
})
}
func (q *queueSubscriber) Close() error {
return q.client.Close()
}
type queueSubscriberMessage struct {
inner *pubsub.Message
}
func (q *queueSubscriberMessage) Data() []byte {
return q.inner.Data
}
func (q *queueSubscriberMessage) Ack() {
q.inner.Ack()
}
func (q *queueSubscriberMessage) Nack() {
q.inner.Nack()
}
type noopQueueSubscriber struct{}
func (q *noopQueueSubscriber) ReceiveMessage(ctx context.Context, cb func(ctx context.Context, msg QueueSubscriberMessage)) error {
return nil
}
func (q *noopQueueSubscriber) Close() error { return nil }
package rollup package rollup
import ( import (
"bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
...@@ -18,14 +16,12 @@ import ( ...@@ -18,14 +16,12 @@ import (
"github.com/ethereum-optimism/optimism/l2geth/ethdb" "github.com/ethereum-optimism/optimism/l2geth/ethdb"
"github.com/ethereum-optimism/optimism/l2geth/event" "github.com/ethereum-optimism/optimism/l2geth/event"
"github.com/ethereum-optimism/optimism/l2geth/log" "github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum-optimism/optimism/l2geth/rlp"
"github.com/ethereum-optimism/optimism/l2geth/core/rawdb" "github.com/ethereum-optimism/optimism/l2geth/core/rawdb"
"github.com/ethereum-optimism/optimism/l2geth/core/types" "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/ethereum-optimism/optimism/l2geth/eth/gasprice" "github.com/ethereum-optimism/optimism/l2geth/eth/gasprice"
"github.com/ethereum-optimism/optimism/l2geth/rollup/fees" "github.com/ethereum-optimism/optimism/l2geth/rollup/fees"
"github.com/ethereum-optimism/optimism/l2geth/rollup/pub"
"github.com/ethereum-optimism/optimism/l2geth/rollup/rcfg" "github.com/ethereum-optimism/optimism/l2geth/rollup/rcfg"
) )
...@@ -72,12 +68,10 @@ type SyncService struct { ...@@ -72,12 +68,10 @@ type SyncService struct {
signer types.Signer signer types.Signer
feeThresholdUp *big.Float feeThresholdUp *big.Float
feeThresholdDown *big.Float feeThresholdDown *big.Float
txLogger pub.Publisher
queueSub QueueSubscriber
} }
// NewSyncService returns an initialized sync service // NewSyncService returns an initialized sync service
func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *core.BlockChain, db ethdb.Database, txLogger pub.Publisher, queueSub QueueSubscriber) (*SyncService, error) { func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *core.BlockChain, db ethdb.Database) (*SyncService, error) {
if bc == nil { if bc == nil {
return nil, errors.New("Must pass BlockChain to SyncService") return nil, errors.New("Must pass BlockChain to SyncService")
} }
...@@ -149,8 +143,6 @@ func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *co ...@@ -149,8 +143,6 @@ func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *co
signer: types.NewEIP155Signer(chainID), signer: types.NewEIP155Signer(chainID),
feeThresholdDown: cfg.FeeThresholdDown, feeThresholdDown: cfg.FeeThresholdDown,
feeThresholdUp: cfg.FeeThresholdUp, feeThresholdUp: cfg.FeeThresholdUp,
txLogger: txLogger,
queueSub: queueSub,
} }
// The chainHeadSub is used to synchronize the SyncService with the chain. // The chainHeadSub is used to synchronize the SyncService with the chain.
...@@ -165,8 +157,7 @@ func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *co ...@@ -165,8 +157,7 @@ func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *co
// a remote server that indexes the layer one contracts. Place this // a remote server that indexes the layer one contracts. Place this
// code behind this if statement so that this can run without the // code behind this if statement so that this can run without the
// requirement of the remote server being up. // requirement of the remote server being up.
// If we're syncing from the Queue, then we can skip all this and rely on L2 published transactions if service.enable {
if service.enable && service.backend != BackendQueue {
// Ensure that the rollup client can connect to a remote server // Ensure that the rollup client can connect to a remote server
// before starting. Retry until it can connect. // before starting. Retry until it can connect.
tEnsure := time.NewTicker(10 * time.Second) tEnsure := time.NewTicker(10 * time.Second)
...@@ -427,10 +418,6 @@ func (s *SyncService) verify() error { ...@@ -427,10 +418,6 @@ func (s *SyncService) verify() error {
if err := s.syncTransactionsToTip(); err != nil { if err := s.syncTransactionsToTip(); err != nil {
return fmt.Errorf("Verifier cannot sync transactions with BackendL2: %w", err) return fmt.Errorf("Verifier cannot sync transactions with BackendL2: %w", err)
} }
case BackendQueue:
if err := s.syncTransactionsFromQueue(); err != nil {
return fmt.Errorf("Verifier cannot sync transactions with BackendQueue: %w", err)
}
} }
return nil return nil
} }
...@@ -878,19 +865,13 @@ func (s *SyncService) applyTransactionToTip(tx *types.Transaction) error { ...@@ -878,19 +865,13 @@ func (s *SyncService) applyTransactionToTip(tx *types.Transaction) error {
// the case where the index is updated but the // the case where the index is updated but the
// transaction isn't yet added to the chain // transaction isn't yet added to the chain
s.SetLatestIndex(tx.GetMeta().Index) s.SetLatestIndex(tx.GetMeta().Index)
if tx.GetMeta().QueueIndex != nil { if queueIndex := tx.GetMeta().QueueIndex; queueIndex != nil {
s.SetLatestEnqueueIndex(tx.GetMeta().QueueIndex) s.SetLatestEnqueueIndex(queueIndex)
} }
// The index was set above so it is safe to dereference // The index was set above so it is safe to dereference
log.Debug("Applying transaction to tip", "index", *tx.GetMeta().Index, "hash", tx.Hash().Hex(), "origin", tx.QueueOrigin().String()) log.Debug("Applying transaction to tip", "index", *tx.GetMeta().Index, "hash", tx.Hash().Hex(), "origin", tx.QueueOrigin().String())
// Log transaction to the failover log
if err := s.publishTransaction(tx); err != nil {
log.Error("Failed to publish transaction to log", "msg", err)
return fmt.Errorf("internal error: transaction logging failed")
}
txs := types.Transactions{tx} txs := types.Transactions{tx}
errCh := make(chan error, 1) errCh := make(chan error, 1)
s.txFeed.Send(core.NewTxsEvent{ s.txFeed.Send(core.NewTxsEvent{
...@@ -1234,95 +1215,12 @@ func (s *SyncService) syncTransactionRange(start, end uint64, backend Backend) e ...@@ -1234,95 +1215,12 @@ func (s *SyncService) syncTransactionRange(start, end uint64, backend Backend) e
return nil return nil
} }
// syncTransactionsFromQueue will sync the earliest transaction from an external message queue
func (s *SyncService) syncTransactionsFromQueue() error {
// we don't drop messages unless they're already applied
cb := func(ctx context.Context, msg QueueSubscriberMessage) {
var (
queuedTxMeta QueuedTransactionMeta
tx types.Transaction
txMeta *types.TransactionMeta
)
log.Debug("Reading transaction from queue", "json", string(msg.Data()))
if err := json.Unmarshal(msg.Data(), &queuedTxMeta); err != nil {
log.Error("Failed to unmarshal logged TransactionMeta", "msg", err)
msg.Nack()
return
}
if err := rlp.DecodeBytes(queuedTxMeta.RawTransaction, &tx); err != nil {
log.Error("decoding raw transaction failed", "msg", err)
msg.Nack()
return
}
if queuedTxMeta.L1BlockNumber == nil || queuedTxMeta.L1Timestamp == nil {
log.Error("Missing required queued transaction fields", "msg", string(msg.Data()))
msg.Nack()
return
}
txMeta = types.NewTransactionMeta(
queuedTxMeta.L1BlockNumber,
*queuedTxMeta.L1Timestamp,
queuedTxMeta.L1MessageSender,
*queuedTxMeta.QueueOrigin,
queuedTxMeta.Index,
queuedTxMeta.QueueIndex,
queuedTxMeta.RawTransaction)
tx.SetTransactionMeta(txMeta)
if readTx, _, _, _ := rawdb.ReadTransaction(s.db, tx.Hash()); readTx != nil {
msg.Ack()
return
}
if err := s.applyTransactionToTip(&tx); err != nil {
log.Error("Unable to apply transactions to tip from Queue", "msg", err)
msg.Nack()
return
}
log.Debug("Successfully applied queued transaction", "txhash", tx.Hash())
msg.Ack()
}
// This blocks until there's a new message in the queue or ctx deadline hits
return s.queueSub.ReceiveMessage(s.ctx, cb)
}
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
// starts sending event to the given channel. // starts sending event to the given channel.
func (s *SyncService) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (s *SyncService) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return s.scope.Track(s.txFeed.Subscribe(ch)) return s.scope.Track(s.txFeed.Subscribe(ch))
} }
func (s *SyncService) publishTransaction(tx *types.Transaction) error {
rawTx := new(bytes.Buffer)
if err := tx.EncodeRLP(rawTx); err != nil {
return err
}
if tx.L1BlockNumber() == nil || tx.L1Timestamp() == 0 {
return fmt.Errorf("transaction doesn't contain required fields")
}
// Manually populate RawTransaction as it's not always available
txMeta := tx.GetMeta()
txMeta.RawTransaction = rawTx.Bytes()
txLog := AsQueuedTransactionMeta(txMeta)
encodedTxLog, err := json.Marshal(&txLog)
if err != nil {
return err
}
if err := s.txLogger.Publish(s.ctx, encodedTxLog); err != nil {
pubTxDropCounter.Inc(1)
return err
}
return nil
}
func stringify(i *uint64) string { func stringify(i *uint64) string {
if i == nil { if i == nil {
return "<nil>" return "<nil>"
...@@ -1335,25 +1233,3 @@ func stringify(i *uint64) string { ...@@ -1335,25 +1233,3 @@ func stringify(i *uint64) string {
func (s *SyncService) IngestTransaction(tx *types.Transaction) error { func (s *SyncService) IngestTransaction(tx *types.Transaction) error {
return s.applyTransaction(tx) return s.applyTransaction(tx)
} }
type QueuedTransactionMeta struct {
L1BlockNumber *big.Int `json:"l1BlockNumber"`
L1Timestamp *uint64 `json:"l1Timestamp"`
L1MessageSender *common.Address `json:"l1MessageSender"`
QueueOrigin *types.QueueOrigin `json:"queueOrigin"`
Index *uint64 `json:"index"`
QueueIndex *uint64 `json:"queueIndex"`
RawTransaction []byte `json:"rawTransaction"`
}
func AsQueuedTransactionMeta(txMeta *types.TransactionMeta) *QueuedTransactionMeta {
return &QueuedTransactionMeta{
L1BlockNumber: txMeta.L1BlockNumber,
L1Timestamp: &txMeta.L1Timestamp,
L1MessageSender: txMeta.L1MessageSender,
QueueOrigin: &txMeta.QueueOrigin,
Index: txMeta.Index,
QueueIndex: txMeta.QueueIndex,
RawTransaction: txMeta.RawTransaction,
}
}
This diff is collapsed.
...@@ -26,8 +26,6 @@ func (s Backend) String() string { ...@@ -26,8 +26,6 @@ func (s Backend) String() string {
return "l1" return "l1"
case BackendL2: case BackendL2:
return "l2" return "l2"
case BackendQueue:
return "queue"
default: default:
return "" return ""
} }
...@@ -40,8 +38,6 @@ func NewBackend(typ string) (Backend, error) { ...@@ -40,8 +38,6 @@ func NewBackend(typ string) (Backend, error) {
return BackendL1, nil return BackendL1, nil
case "l2": case "l2":
return BackendL2, nil return BackendL2, nil
case "queue":
return BackendQueue, nil
default: default:
return 0, fmt.Errorf("Unknown Backend: %s", typ) return 0, fmt.Errorf("Unknown Backend: %s", typ)
} }
...@@ -58,11 +54,6 @@ const ( ...@@ -58,11 +54,6 @@ const (
// around the transactions as they have not been submitted via a batch to // around the transactions as they have not been submitted via a batch to
// L1. // L1.
BackendL2 BackendL2
// BackendQueue Backend involves syncing transactions from an external message queue.
// This has the same guarantees as BackendL2 as such transactions may not have been
// submitted via a batch to L1.
BackendQueue
) )
func isCtcTxEqual(a, b *types.Transaction) bool { func isCtcTxEqual(a, b *types.Transaction) bool {
......
FROM golang:1.17.3-alpine3.13 as builder FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
...@@ -10,7 +10,7 @@ RUN go mod graph | grep -v l2geth | grep -v bss-core | awk '{if ($1 !~ "@") prin ...@@ -10,7 +10,7 @@ RUN go mod graph | grep -v l2geth | grep -v bss-core | awk '{if ($1 !~ "@") prin
COPY ./go/batch-submitter/ ./ COPY ./go/batch-submitter/ ./
RUN make RUN make
FROM alpine:3.13 FROM alpine:3.15
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/ COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/
......
FROM golang:1.15-alpine3.13 as builder FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go/gas-oracle /gas-oracle COPY ./go/gas-oracle /gas-oracle
RUN cd /gas-oracle && make gas-oracle RUN cd /gas-oracle && make gas-oracle
FROM alpine:3.13 FROM alpine:3.15
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /gas-oracle/gas-oracle /usr/local/bin/ COPY --from=builder /gas-oracle/gas-oracle /usr/local/bin/
......
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.15-alpine3.13 as builder FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git RUN apk add --no-cache make gcc musl-dev linux-headers git
...@@ -10,7 +10,7 @@ COPY ./l2geth ./ ...@@ -10,7 +10,7 @@ COPY ./l2geth ./
RUN make geth RUN make geth
# Pull Geth into a second stage deploy alpine container # Pull Geth into a second stage deploy alpine container
FROM alpine:3.13 FROM alpine:3.15
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
......
FROM golang:1.17.3-alpine3.13 as builder FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
...@@ -9,7 +9,7 @@ RUN go mod graph | grep -v indexer | awk '{if ($1 !~ "@") print $2}' | xargs -n ...@@ -9,7 +9,7 @@ RUN go mod graph | grep -v indexer | awk '{if ($1 !~ "@") print $2}' | xargs -n
COPY ./go/indexer/ ./ COPY ./go/indexer/ ./
RUN make RUN make
FROM alpine:3.13 FROM alpine:3.15
COPY --from=builder /go/indexer/indexer /usr/local/bin COPY --from=builder /go/indexer/indexer /usr/local/bin
......
...@@ -4,7 +4,7 @@ COPY ./go/l2geth-exporter /app/ ...@@ -4,7 +4,7 @@ COPY ./go/l2geth-exporter /app/
WORKDIR /app/ WORKDIR /app/
RUN make build RUN make build
FROM alpine:3.13 FROM alpine:3.15
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
WORKDIR /root/ WORKDIR /root/
COPY --from=builder /app/l2geth-exporter /usr/local/bin/ COPY --from=builder /app/l2geth-exporter /usr/local/bin/
......
...@@ -4,7 +4,7 @@ COPY ./go/op-exporter /app/ ...@@ -4,7 +4,7 @@ COPY ./go/op-exporter /app/
WORKDIR /app/ WORKDIR /app/
RUN make build RUN make build
FROM alpine:3.13 FROM alpine:3.15
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
WORKDIR /root/ WORKDIR /root/
COPY --from=builder /app/op-exporter /usr/local/bin/ COPY --from=builder /app/op-exporter /usr/local/bin/
......
FROM golang:1.17.3-alpine3.13 as builder FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
...@@ -9,7 +9,7 @@ RUN go mod graph | grep -v bss-core | awk '{if ($1 !~ "@") print $2}' | xargs -n ...@@ -9,7 +9,7 @@ RUN go mod graph | grep -v bss-core | awk '{if ($1 !~ "@") print $2}' | xargs -n
COPY ./go/teleportr/ ./ COPY ./go/teleportr/ ./
RUN make teleportr teleportr-api RUN make teleportr teleportr-api
FROM alpine:3.13 FROM alpine:3.15
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go/teleportr/teleportr /usr/local/bin/ COPY --from=builder /go/teleportr/teleportr /usr/local/bin/
......
FROM golang:1.17.8-alpine3.15 FROM golang:1.18.0-alpine3.15
RUN apk add --no-cache make gcc musl-dev linux-headers git jq curl bash gzip ca-certificates openssh && \ RUN apk add --no-cache make gcc musl-dev linux-headers git jq curl bash gzip ca-certificates openssh && \
go install gotest.tools/gotestsum@latest && \ go install gotest.tools/gotestsum@latest && \
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.44.2 curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.45.2
CMD ["bash"] CMD ["bash"]
{
"name": "@eth-optimism/go-builder",
"version": "0.0.0",
"scripts": {},
"license": "MIT",
"dependencies": {}
}
{
"name": "@eth-optimism/js-builder",
"version": "0.0.0",
"scripts": {},
"license": "MIT",
"dependencies": {}
}
...@@ -10,7 +10,9 @@ ...@@ -10,7 +10,9 @@
"integration-tests", "integration-tests",
"go/*", "go/*",
"ops/docker/rpc-proxy", "ops/docker/rpc-proxy",
"ops/docker/hardhat" "ops/docker/hardhat",
"ops/docker/go-builder",
"ops/docker/js-builder"
], ],
"nohoist": [ "nohoist": [
"examples/*", "examples/*",
...@@ -36,7 +38,7 @@ ...@@ -36,7 +38,7 @@
"lint-staged": "11.0.0", "lint-staged": "11.0.0",
"patch-package": "^6.4.7", "patch-package": "^6.4.7",
"prettier": "^2.3.1", "prettier": "^2.3.1",
"typescript": "^4.3.5" "typescript": "^4.6.2"
}, },
"scripts": { "scripts": {
"clean": "yarn lerna run clean --parallel", "clean": "yarn lerna run clean --parallel",
......
...@@ -70,6 +70,6 @@ ...@@ -70,6 +70,6 @@
"prettier": "^2.3.1", "prettier": "^2.3.1",
"supertest": "^6.1.4", "supertest": "^6.1.4",
"ts-mocha": "^8.0.0", "ts-mocha": "^8.0.0",
"typescript": "^4.3.5" "typescript": "^4.6.2"
} }
} }
...@@ -34,27 +34,6 @@ library Lib_SecureMerkleTrie { ...@@ -34,27 +34,6 @@ library Lib_SecureMerkleTrie {
return Lib_MerkleTrie.verifyInclusionProof(key, _value, _proof, _root); return Lib_MerkleTrie.verifyInclusionProof(key, _value, _proof, _root);
} }
/**
* @notice Updates a Merkle trie and returns a new root hash.
* @param _key Key of the node to update, as a hex string.
* @param _value Value of the node to update, as a hex string.
* @param _proof Merkle trie inclusion proof for the node *nearest* the
* target node. If the key exists, we can simply update the value.
* Otherwise, we need to modify the trie to handle the new k/v pair.
* @param _root Known root of the Merkle trie. Used to verify that the
* included proof is correctly constructed.
* @return _updatedRoot Root hash of the newly constructed trie.
*/
function update(
bytes memory _key,
bytes memory _value,
bytes memory _proof,
bytes32 _root
) internal pure returns (bytes32 _updatedRoot) {
bytes memory key = _getSecureKey(_key);
return Lib_MerkleTrie.update(key, _value, _proof, _root);
}
/** /**
* @notice Retrieves the value associated with a given key. * @notice Retrieves the value associated with a given key.
* @param _key Key to search for, as hex bytes. * @param _key Key to search for, as hex bytes.
...@@ -72,21 +51,6 @@ library Lib_SecureMerkleTrie { ...@@ -72,21 +51,6 @@ library Lib_SecureMerkleTrie {
return Lib_MerkleTrie.get(key, _proof, _root); return Lib_MerkleTrie.get(key, _proof, _root);
} }
/**
* Computes the root hash for a trie with a single node.
* @param _key Key for the single node.
* @param _value Value for the single node.
* @return _updatedRoot Hash of the trie.
*/
function getSingleNodeRootHash(bytes memory _key, bytes memory _value)
internal
pure
returns (bytes32 _updatedRoot)
{
bytes memory key = _getSecureKey(_key);
return Lib_MerkleTrie.getSingleNodeRootHash(key, _value);
}
/********************* /*********************
* Private Functions * * Private Functions *
*********************/ *********************/
......
...@@ -17,15 +17,6 @@ contract TestLib_MerkleTrie { ...@@ -17,15 +17,6 @@ contract TestLib_MerkleTrie {
return Lib_MerkleTrie.verifyInclusionProof(_key, _value, _proof, _root); return Lib_MerkleTrie.verifyInclusionProof(_key, _value, _proof, _root);
} }
function update(
bytes memory _key,
bytes memory _value,
bytes memory _proof,
bytes32 _root
) public pure returns (bytes32) {
return Lib_MerkleTrie.update(_key, _value, _proof, _root);
}
function get( function get(
bytes memory _key, bytes memory _key,
bytes memory _proof, bytes memory _proof,
...@@ -33,12 +24,4 @@ contract TestLib_MerkleTrie { ...@@ -33,12 +24,4 @@ contract TestLib_MerkleTrie {
) public pure returns (bool, bytes memory) { ) public pure returns (bool, bytes memory) {
return Lib_MerkleTrie.get(_key, _proof, _root); return Lib_MerkleTrie.get(_key, _proof, _root);
} }
function getSingleNodeRootHash(bytes memory _key, bytes memory _value)
public
pure
returns (bytes32)
{
return Lib_MerkleTrie.getSingleNodeRootHash(_key, _value);
}
} }
...@@ -17,15 +17,6 @@ contract TestLib_SecureMerkleTrie { ...@@ -17,15 +17,6 @@ contract TestLib_SecureMerkleTrie {
return Lib_SecureMerkleTrie.verifyInclusionProof(_key, _value, _proof, _root); return Lib_SecureMerkleTrie.verifyInclusionProof(_key, _value, _proof, _root);
} }
function update(
bytes memory _key,
bytes memory _value,
bytes memory _proof,
bytes32 _root
) public pure returns (bytes32) {
return Lib_SecureMerkleTrie.update(_key, _value, _proof, _root);
}
function get( function get(
bytes memory _key, bytes memory _key,
bytes memory _proof, bytes memory _proof,
...@@ -33,12 +24,4 @@ contract TestLib_SecureMerkleTrie { ...@@ -33,12 +24,4 @@ contract TestLib_SecureMerkleTrie {
) public pure returns (bool, bytes memory) { ) public pure returns (bool, bytes memory) {
return Lib_SecureMerkleTrie.get(_key, _proof, _root); return Lib_SecureMerkleTrie.get(_key, _proof, _root);
} }
function getSingleNodeRootHash(bytes memory _key, bytes memory _value)
public
pure
returns (bytes32)
{
return Lib_SecureMerkleTrie.getSingleNodeRootHash(_key, _value);
}
} }
...@@ -122,7 +122,7 @@ ...@@ -122,7 +122,7 @@
"ts-generator": "0.0.8", "ts-generator": "0.0.8",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typechain": "^6.0.2", "typechain": "^6.0.2",
"typescript": "^4.3.5" "typescript": "^4.6.2"
}, },
"peerDependencies": { "peerDependencies": {
"ethers": "^5" "ethers": "^5"
......
...@@ -2,14 +2,11 @@ ...@@ -2,14 +2,11 @@
import * as rlp from 'rlp' import * as rlp from 'rlp'
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { Contract } from 'ethers' import { Contract } from 'ethers'
import { fromHexString, toHexString } from '@eth-optimism/core-utils' import { toHexString } from '@eth-optimism/core-utils'
import { Trie } from 'merkle-patricia-tree/dist/baseTrie'
/* Internal Imports */ /* Internal Imports */
import { expect } from '../../../setup' import { expect } from '../../../setup'
import { TrieTestGenerator } from '../../../helpers' import { TrieTestGenerator } from '../../../helpers'
import * as officialTestJson from '../../../data/json/libraries/trie/trietest.json'
import * as officialTestAnyOrderJson from '../../../data/json/libraries/trie/trieanyorder.json'
const NODE_COUNTS = [1, 2, 32, 128] const NODE_COUNTS = [1, 2, 32, 128]
...@@ -22,100 +19,6 @@ describe('Lib_MerkleTrie', () => { ...@@ -22,100 +19,6 @@ describe('Lib_MerkleTrie', () => {
}) })
// Eth-foundation tests: https://github.com/ethereum/tests/tree/develop/TrieTests // Eth-foundation tests: https://github.com/ethereum/tests/tree/develop/TrieTests
describe('official tests', () => {
for (const testName of Object.keys(officialTestJson.tests)) {
it(`should perform official test: ${testName}`, async () => {
const trie = new Trie()
const inputs = officialTestJson.tests[testName].in
const expected = officialTestJson.tests[testName].root
for (const input of inputs) {
let key: Buffer
if (input[0].startsWith('0x')) {
key = fromHexString(input[0])
} else {
key = fromHexString(
ethers.utils.hexlify(ethers.utils.toUtf8Bytes(input[0]))
)
}
let val: Buffer
if (input[1] === null) {
throw new Error('deletions not supported, check your tests')
} else if (input[1].startsWith('0x')) {
val = fromHexString(input[1])
} else {
val = fromHexString(
ethers.utils.hexlify(ethers.utils.toUtf8Bytes(input[1]))
)
}
const proof = await Trie.createProof(trie, key)
const root = trie.root
await trie.put(key, val)
const out = await Lib_MerkleTrie.update(
toHexString(key),
toHexString(val),
toHexString(rlp.encode(proof)),
root
)
expect(out).to.equal(toHexString(trie.root))
}
expect(toHexString(trie.root)).to.equal(expected)
})
}
})
describe('official tests - trie any order', () => {
for (const testName of Object.keys(officialTestAnyOrderJson.tests)) {
it(`should perform official test: ${testName}`, async () => {
const trie = new Trie()
const inputs = officialTestAnyOrderJson.tests[testName].in
const expected = officialTestAnyOrderJson.tests[testName].root
for (const input of Object.keys(inputs)) {
let key: Buffer
if (input.startsWith('0x')) {
key = fromHexString(input)
} else {
key = fromHexString(
ethers.utils.hexlify(ethers.utils.toUtf8Bytes(input))
)
}
let val: Buffer
if (inputs[input] === null) {
throw new Error('deletions not supported, check your tests')
} else if (inputs[input].startsWith('0x')) {
val = fromHexString(inputs[input])
} else {
val = fromHexString(
ethers.utils.hexlify(ethers.utils.toUtf8Bytes(inputs[input]))
)
}
const proof = await Trie.createProof(trie, key)
const root = trie.root
await trie.put(key, val)
const out = await Lib_MerkleTrie.update(
toHexString(key),
toHexString(val),
toHexString(rlp.encode(proof)),
root
)
expect(out).to.equal(toHexString(trie.root))
}
expect(toHexString(trie.root)).to.equal(expected)
})
}
})
describe('verifyInclusionProof', () => { describe('verifyInclusionProof', () => {
for (const nodeCount of NODE_COUNTS) { for (const nodeCount of NODE_COUNTS) {
describe(`inside a trie with ${nodeCount} nodes and keys/vals of size ${nodeCount} bytes`, () => { describe(`inside a trie with ${nodeCount} nodes and keys/vals of size ${nodeCount} bytes`, () => {
...@@ -152,62 +55,6 @@ describe('Lib_MerkleTrie', () => { ...@@ -152,62 +55,6 @@ describe('Lib_MerkleTrie', () => {
} }
}) })
describe('update', () => {
for (const nodeCount of NODE_COUNTS) {
describe(`inside a trie with ${nodeCount} nodes and keys/vals of size ${nodeCount} bytes`, () => {
let generator: TrieTestGenerator
before(async () => {
generator = await TrieTestGenerator.fromRandom({
seed: `seed.update.${nodeCount}`,
nodeCount,
secure: false,
keySize: nodeCount,
valSize: nodeCount,
})
})
for (
let i = 0;
i < nodeCount;
i += nodeCount / (nodeCount > 8 ? 8 : 1)
) {
it(`should correctly update node #${i}`, async () => {
const test = await generator.makeNodeUpdateTest(
i,
'0x1234123412341234'
)
expect(
await Lib_MerkleTrie.update(
test.key,
test.val,
test.proof,
test.root
)
).to.equal(test.newRoot)
})
}
})
}
it('should return the single-node root hash if the trie was previously empty', async () => {
const key = '0x1234'
const val = '0x5678'
const trie = new Trie()
await trie.put(fromHexString(key), fromHexString(val))
expect(
await Lib_MerkleTrie.update(
key,
val,
'0x', // Doesn't require a proof
ethers.utils.keccak256('0x80') // Empty Merkle trie root hash
)
).to.equal(toHexString(trie.root))
})
})
describe('get', () => { describe('get', () => {
for (const nodeCount of NODE_COUNTS) { for (const nodeCount of NODE_COUNTS) {
describe(`inside a trie with ${nodeCount} nodes and keys/vals of size ${nodeCount} bytes`, () => { describe(`inside a trie with ${nodeCount} nodes and keys/vals of size ${nodeCount} bytes`, () => {
......
...@@ -50,43 +50,6 @@ describe('Lib_SecureMerkleTrie', () => { ...@@ -50,43 +50,6 @@ describe('Lib_SecureMerkleTrie', () => {
} }
}) })
describe('update', () => {
for (const nodeCount of NODE_COUNTS) {
describe(`inside a trie with ${nodeCount} nodes`, () => {
let generator: TrieTestGenerator
before(async () => {
generator = await TrieTestGenerator.fromRandom({
seed: `seed.update.${nodeCount}`,
nodeCount,
secure: true,
})
})
for (
let i = 0;
i < nodeCount;
i += nodeCount / (nodeCount > 8 ? 8 : 1)
) {
it(`should correctly update node #${i}`, async () => {
const test = await generator.makeNodeUpdateTest(
i,
'0x1234123412341234'
)
expect(
await Lib_SecureMerkleTrie.update(
test.key,
test.val,
test.proof,
test.root
)
).to.equal(test.newRoot)
})
}
})
}
})
describe('get', () => { describe('get', () => {
for (const nodeCount of NODE_COUNTS) { for (const nodeCount of NODE_COUNTS) {
describe(`inside a trie with ${nodeCount} nodes`, () => { describe(`inside a trie with ${nodeCount} nodes`, () => {
...@@ -115,22 +78,4 @@ describe('Lib_SecureMerkleTrie', () => { ...@@ -115,22 +78,4 @@ describe('Lib_SecureMerkleTrie', () => {
}) })
} }
}) })
describe('getSingleNodeRootHash', () => {
let generator: TrieTestGenerator
before(async () => {
generator = await TrieTestGenerator.fromRandom({
seed: `seed.get.${1}`,
nodeCount: 1,
secure: true,
})
})
it(`should get the root hash of a trie with a single node`, async () => {
const test = await generator.makeInclusionProofTest(0)
expect(
await Lib_SecureMerkleTrie.getSingleNodeRootHash(test.key, test.val)
).to.equal(test.root)
})
})
}) })
...@@ -60,6 +60,6 @@ ...@@ -60,6 +60,6 @@
"nyc": "^15.1.0", "nyc": "^15.1.0",
"prettier": "^2.3.1", "prettier": "^2.3.1",
"ts-mocha": "^8.0.0", "ts-mocha": "^8.0.0",
"typescript": "^4.3.5" "typescript": "^4.6.2"
} }
} }
{ {
"extends": "../../tsconfig.json", "extends": "../../tsconfig.json",
"typeRoots": ["node_modules/@types", "src/@types"] "compilerOptions": {
"typeRoots": ["node_modules/@types", "src/@types"]
}
} }
...@@ -83,6 +83,6 @@ ...@@ -83,6 +83,6 @@
"prom-client": "^13.1.0", "prom-client": "^13.1.0",
"rimraf": "^3.0.2", "rimraf": "^3.0.2",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typescript": "^4.3.5" "typescript": "^4.6.2"
} }
} }
{ {
"extends": "../../tsconfig.json", "extends": "../../tsconfig.json",
"typeRoots": ["node_modules/@types", "src/@types"] "compilerOptions": {
"typeRoots": ["node_modules/@types", "src/@types"]
}
} }
...@@ -56,6 +56,6 @@ ...@@ -56,6 +56,6 @@
"lint-staged": "11.0.0", "lint-staged": "11.0.0",
"prettier": "^2.3.1", "prettier": "^2.3.1",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typescript": "^4.3.5" "typescript": "^4.6.2"
} }
} }
...@@ -50,6 +50,6 @@ ...@@ -50,6 +50,6 @@
"eslint-plugin-unicorn": "^32.0.1", "eslint-plugin-unicorn": "^32.0.1",
"lint-staged": "11.0.0", "lint-staged": "11.0.0",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typescript": "^4.3.5" "typescript": "^4.6.2"
} }
} }
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
"prettier": "^2.3.1", "prettier": "^2.3.1",
"ts-mocha": "^8.0.0", "ts-mocha": "^8.0.0",
"typedoc": "^0.22.13", "typedoc": "^0.22.13",
"typescript": "^4.3.5" "typescript": "^4.6.2"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.19", "@eth-optimism/contracts": "0.5.19",
......
...@@ -15430,6 +15430,11 @@ typedarray@^0.0.6: ...@@ -15430,6 +15430,11 @@ typedarray@^0.0.6:
resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=
typescript@^4.3.4, typescript@^4.3.5, typescript@^4.6.2:
version "4.6.2"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.6.2.tgz#fe12d2727b708f4eef40f51598b3398baa9611d4"
integrity sha512-HM/hFigTBHZhLXshn9sN37H085+hQGeJHJ/X7LpBWLID/fbc2acUMfU+lGD98X81sKP+pFa9f0DZmCwB9GnbAg==
typedoc@^0.22.13: typedoc@^0.22.13:
version "0.22.13" version "0.22.13"
resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.22.13.tgz#d061f8f0fb7c9d686e48814f245bddeea4564e66" resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.22.13.tgz#d061f8f0fb7c9d686e48814f245bddeea4564e66"
...@@ -15441,11 +15446,6 @@ typedoc@^0.22.13: ...@@ -15441,11 +15446,6 @@ typedoc@^0.22.13:
minimatch "^5.0.1" minimatch "^5.0.1"
shiki "^0.10.1" shiki "^0.10.1"
typescript@^4.3.4, typescript@^4.3.5:
version "4.3.5"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.3.5.tgz#4d1c37cc16e893973c45a06886b7113234f119f4"
integrity sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA==
typewise-core@^1.2, typewise-core@^1.2.0: typewise-core@^1.2, typewise-core@^1.2.0:
version "1.2.0" version "1.2.0"
resolved "https://registry.yarnpkg.com/typewise-core/-/typewise-core-1.2.0.tgz#97eb91805c7f55d2f941748fa50d315d991ef195" resolved "https://registry.yarnpkg.com/typewise-core/-/typewise-core-1.2.0.tgz#97eb91805c7f55d2f941748fa50d315d991ef195"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment