Commit ad4440f0 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2721 from ethereum-optimism/develop

Develop -> Master
parents 06192c49 f96e73b5
---
'@eth-optimism/proxyd': patch
---
Improve robustness against unexpected JSON-RPC from upstream
---
'@eth-optimism/contracts-periphery': patch
---
Re-deploy RetroReceiver
---
'@eth-optimism/contracts': patch
---
goerli redeploy
---
'@eth-optimism/contracts-periphery': patch
---
Tweaks Drippie contract for client-side ease
---
"@eth-optimism/contracts-governance": patch
"@eth-optimism/contracts": patch
---
package: contracts-governance
---
'@eth-optimism/proxyd': patch
---
Fix concurrent write panic in WS
---
'@eth-optimism/contracts-bedrock': patch
---
Update comments and style for L1 contracts
---
'@eth-optimism/contracts-periphery': patch
---
Adds new TeleportrWithdrawer contract for withdrawing from Teleportr
---
'@eth-optimism/drippie-mon': minor
---
Release drippie-mon
---
'@eth-optimism/common-ts': patch
---
Expose service internal options as environment or cli options
......@@ -91,6 +91,7 @@ jobs:
- packages/contracts-periphery/node_modules
- packages/core-utils/node_modules
- packages/data-transport-layer/node_modules
- packages/drippie-mon/node_modules
- packages/fault-detector/node_modules
- packages/message-relayer/node_modules
- packages/replica-healthcheck/node_modules
......@@ -178,6 +179,24 @@ jobs:
command: yarn test:coverage
working_directory: packages/contracts-periphery
contracts-governance-tests:
docker:
- image: ethereumoptimism/js-builder:latest
resource_class: xlarge
steps:
- restore_cache:
keys:
- v2-cache-yarn-build-{{ .Revision }}
- checkout
- run:
name: Lint
command: yarn lint:check
working_directory: packages/contracts-governance
- run:
name: Test
command: yarn test
working_directory: packages/contracts-governance
dtl-tests:
docker:
- image: ethereumoptimism/js-builder:latest
......@@ -518,6 +537,9 @@ workflows:
- contracts-bedrock-tests:
requires:
- yarn-monorepo
- contracts-governance-tests:
requires:
- yarn-monorepo
- js-lint-test:
name: dtl-tests
package_name: data-transport-layer
......@@ -538,6 +560,11 @@ workflows:
package_name: fault-detector
requires:
- yarn-monorepo
- js-lint-test:
name: drippie-mon-tests
package_name: drippie-mon
requires:
- yarn-monorepo
- js-lint-test:
name: message-relayer-tests
package_name: message-relayer
......@@ -628,6 +655,14 @@ workflows:
target: fault-detector
context:
- optimism
- docker-publish:
name: drippie-mon-release
docker_file: ops/docker/Dockerfile.packages
docker_tags: ethereumoptimism/drippie-mon:nightly
docker_context: .
target: drippie-mon
context:
- optimism
- docker-publish:
name: message-relayer-release
docker_file: ops/docker/Dockerfile.packages
......
......@@ -7,6 +7,7 @@
- 'packages/contracts/**/*'
- 'packages/contracts-periphery/**/*'
- 'packages/data-transport-layer/**/*'
- 'packages/drippie-mon/**/*'
- 'packages/message-relayer/**/*'
- 'packages/fault-detector/**/*'
- 'patches/**/*'
......
......@@ -27,12 +27,23 @@ pull_request_rules:
queue:
name: default
method: squash
- name: Add merge train label
conditions:
- "queue-position >= 0"
actions:
comment:
message: |
This PR has been added to the merge queue, and will be merged soon.
message: |
This PR has been added to the merge queue, and will be merged soon.
label:
add:
- on-merge-train
- name: Remove merge train label
conditions:
- "queue-position = -1"
actions:
label:
remove:
- on-merge-train
- name: Handle security critical PRs
conditions:
- "label=SR-Risk"
......@@ -96,9 +107,6 @@ pull_request_rules:
comment:
message: |
Merge failed. Please see automated check logs for more details.
label:
remove:
- on-merge-train
- name: Nag changesets
conditions:
- and:
......
......@@ -18,6 +18,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
contracts: ${{ steps.packages.outputs.contracts }}
gas-oracle: ${{ steps.packages.outputs.gas-oracle }}
......@@ -229,6 +230,33 @@ jobs:
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.drippie-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: relayer
push: true
tags: ethereumoptimism/drippie-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -14,6 +14,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
contracts: ${{ steps.packages.outputs.contracts }}
gas-oracle: ${{ steps.packages.outputs.gas-oracle }}
......@@ -372,6 +373,33 @@ jobs:
push: true
tags: ethereumoptimism/fault-detector:${{ needs.release.outputs.fault-detector }},ethereumoptimism/fault-detector:latest
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.release.outputs.drippie-mon }}
needs: release
if: needs.release.outputs.drippie-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: drippie-mon
push: true
tags: ethereumoptimism/drippie-mon:${{ needs.release.outputs.drippie-mon }},ethereumoptimism/drippie-mon:latest
data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.release.outputs.data-transport-layer }}
needs: release
......
......@@ -9,6 +9,7 @@
{"directory": "packages/contracts", "changeProcessCWD": true },
{"directory": "packages/contracts-periphery", "changeProcessCWD": true },
{"directory": "packages/data-transport-layer", "changeProcessCWD": true },
{"directory": "packages/drippie-mon", "changeProcessCWD": true },
{"directory": "packages/batch-submitter", "changeProcessCWD": true },
{"directory": "packages/message-relayer", "changeProcessCWD": true },
{"directory": "packages/fault-detector", "changeProcessCWD": true },
......
......@@ -64,6 +64,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Foundry](https://getfoundry.sh)
### Setup
......@@ -85,6 +86,10 @@ nvm use
### Building the TypeScript packages
[foundry](https://github.com/foundry-rs/foundry) is used for some smart contract
development in the monorepo. It is required to build the TypeScript packages
and compile the smart contracts. Install foundry [here](https://getfoundry.sh/).
To build all of the [TypeScript packages](./packages), run:
```bash
......
......@@ -59,8 +59,8 @@ devnet-clean:
rm -rf ./packages/contracts-bedrock/deployments/devnetL1
rm -rf ./.devnet
cd ./ops-bedrock && docker-compose down
docker image ls | grep ops-bedrock_ | cut -d ' ' -f 1 | xargs docker rmi
docker volume ls | grep ops-bedrock_ | cut -d ' ' -f 1 | xargs docker volume rm
docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs docker rmi
docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs docker volume rm
.PHONY: devnet-clean
......
......@@ -36,7 +36,8 @@ root
│ ├── <a href="./packages/contracts-periphery">contracts-periphery</a>: Peripheral contracts for Optimism
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/data-transport-layer">data-transport-layer</a>: Service for indexing Optimism-related L1 data
│ ├── <a href="./packages/fault-detector">fault-detector</a>:
│ ├── <a href="./packages/drippie-mon">drippie-mon</a>: Service for monitoring Drippie instances
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
│ ├── <a href="./packages/integration-tests-bedrock">integration-tests-bedrock</a> (BEDROCK upgrade): Bedrock integration tests.
│ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
......
......@@ -15,6 +15,7 @@ use (
./op-proposer
./proxyd
./teleportr
./state-surgery
)
replace github.com/ethereum/go-ethereum v1.10.17 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220602230953-dd2e24b3359f
......
This diff is collapsed.
package predeploys
const (
L2ToL1MessagePasser = "0x4200000000000000000000000000000000000000"
OVM_DeployerWhitelist = "0x4200000000000000000000000000000000000002"
OVM_ETH = "0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000"
WETH9 = "0x4200000000000000000000000000000000000006"
L2CrossDomainMessenger = "0x4200000000000000000000000000000000000007"
L2StandardBridge = "0x4200000000000000000000000000000000000010"
SequencerFeeVault = "0x4200000000000000000000000000000000000011"
OptimismMintableTokenFactory = "0x4200000000000000000000000000000000000012"
L1BlockNumber = "0x4200000000000000000000000000000000000013"
OVM_GasPriceOracle = "0x420000000000000000000000000000000000000F"
L1Block = "0x4200000000000000000000000000000000000015"
GovernanceToken = "0x4200000000000000000000000000000000000042"
)
......@@ -11,9 +11,9 @@ import (
bss "github.com/ethereum-optimism/optimism/op-batcher"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/predeploy"
"github.com/ethereum-optimism/optimism/op-node/rollup"
l2os "github.com/ethereum-optimism/optimism/op-proposer"
......@@ -234,7 +234,7 @@ func (cfg SystemConfig) start() (*System, error) {
}
l2Alloc[cfg.L1InfoPredeployAddress] = core.GenesisAccount{Code: common.FromHex(bindings.L1BlockDeployedBin), Balance: common.Big0}
l2Alloc[predeploy.WithdrawalContractAddress] = core.GenesisAccount{Code: common.FromHex(bindings.L2ToL1MessagePasserDeployedBin), Balance: common.Big0}
l2Alloc[common.HexToAddress(predeploys.L2ToL1MessagePasser)] = core.GenesisAccount{Code: common.FromHex(bindings.L2ToL1MessagePasserDeployedBin), Balance: common.Big0}
genesisTimestamp := uint64(time.Now().Unix())
......@@ -335,9 +335,9 @@ func (cfg SystemConfig) start() (*System, error) {
L1NodeAddr: l1Node.WSEndpoint(),
L1TrustRPC: false,
}
rollupCfg.L2s = &rollupNode.L2EndpointsConfig{
L2EngineAddrs: []string{sys.nodes[name].WSAuthEndpoint()},
L2EngineJWTSecrets: [][32]byte{cfg.JWTSecret},
rollupCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: sys.nodes[name].WSAuthEndpoint(),
L2EngineJWTSecret: cfg.JWTSecret,
}
}
......
......@@ -11,10 +11,10 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/l2"
"github.com/ethereum-optimism/optimism/op-node/node"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/predeploy"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/testlog"
......@@ -97,7 +97,7 @@ func defaultSystemConfig(t *testing.T) SystemConfig {
P2PSignerHDPath: p2pSignerHDPath,
DeployerHDPath: l2OutputHDPath,
CliqueSignerDerivationPath: cliqueSignerHDPath,
L1InfoPredeployAddress: derive.L1InfoPredeployAddr,
L1InfoPredeployAddress: common.HexToAddress(predeploys.L1Block),
L1BlockTime: 2,
L1ChainID: big.NewInt(900),
L2ChainID: big.NewInt(901),
......@@ -709,7 +709,7 @@ func TestWithdrawals(t *testing.T) {
require.Nil(t, err, "Waiting for deposit tx on L1")
// Bind L2 Withdrawer Contract
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploy.WithdrawalContractAddress, l2Seq)
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(common.HexToAddress(predeploys.L2ToL1MessagePasser), l2Seq)
require.Nil(t, err, "binding withdrawer on L2")
// Wait for deposit to arrive
......
......@@ -19,9 +19,9 @@ var (
Value: "http://127.0.0.1:8545",
EnvVar: prefixEnvVar("L1_ETH_RPC"),
}
L2EngineAddrs = cli.StringSliceFlag{
L2EngineAddr = cli.StringFlag{
Name: "l2",
Usage: "Addresses of L2 Engine JSON-RPC endpoints to use (engine and eth namespace required)",
Usage: "Address of L2 Engine JSON-RPC endpoints to use (engine and eth namespace required)",
Required: true,
EnvVar: prefixEnvVar("L2_ENGINE_RPC"),
}
......@@ -50,13 +50,13 @@ var (
Usage: "Trust the L1 RPC, sync faster at risk of malicious/buggy RPC providing bad or inconsistent L1 data",
EnvVar: prefixEnvVar("L1_TRUST_RPC"),
}
L2EngineJWTSecret = cli.StringSliceFlag{
Name: "l2.jwt-secret",
Usage: "Paths to JWT secret keys, one per L2 endpoint, in the same order as the provided l2 addresses. " +
"Keys are 32 bytes, hex encoded in a file. A new key per endpoint will be generated if left empty.",
Required: false,
Value: &cli.StringSlice{},
EnvVar: prefixEnvVar("L2_ENGINE_AUTH"),
L2EngineJWTSecret = cli.StringFlag{
Name: "l2.jwt-secret",
Usage: "Path to JWT secret key. Keys are 32 bytes, hex encoded in a file. A new key will be generated if left empty.",
EnvVar: prefixEnvVar("L2_ENGINE_AUTH"),
Required: false,
Value: "",
Destination: new(string),
}
SequencingEnabledFlag = cli.BoolFlag{
Name: "sequencing.enabled",
......@@ -91,7 +91,7 @@ var (
var requiredFlags = []cli.Flag{
L1NodeAddr,
L2EngineAddrs,
L2EngineAddr,
RollupConfig,
RPCListenAddr,
RPCListenPort,
......
......@@ -143,7 +143,7 @@ func (s *Source) GetPayload(ctx context.Context, payloadId PayloadID) (*Executio
var result ExecutionPayload
err := s.rpc.CallContext(ctx, &result, "engine_getPayloadV1", payloadId)
if err != nil {
e = e.New("payload_id", "err", err)
e = e.New("payload_id", payloadId, "err", err)
if rpcErr, ok := err.(rpc.Error); ok {
code := ErrorCode(rpcErr.ErrorCode())
if code != UnavailablePayload {
......
......@@ -82,7 +82,6 @@ func (res *AccountResult) Verify(stateRoot common.Hash) error {
}
// BlockToBatch converts a L2 block to batch-data.
// Empty L2 blocks (i.e. only a L1 info deposit tx) return a nil batch with nil error.
// Invalid L2 blocks may return an error.
func BlockToBatch(config *rollup.Config, block *types.Block) (*derive.BatchData, error) {
txs := block.Transactions()
......@@ -92,9 +91,6 @@ func BlockToBatch(config *rollup.Config, block *types.Block) (*derive.BatchData,
if typ := txs[0].Type(); typ != types.DepositTxType {
return nil, fmt.Errorf("expected first tx to be a deposit of L1 info, but got type: %d", typ)
}
if len(txs) == 1 { // the L1 info deposit tx, but empty otherwise, no batch data to submit
return nil, nil
}
// encode non-deposit transactions
var opaqueTxs []hexutil.Bytes
......
......@@ -9,9 +9,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/version"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/l2"
"github.com/ethereum-optimism/optimism/op-node/predeploy"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
......@@ -62,7 +62,7 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number rpc.BlockNumber) ([]
return nil, ethereum.NotFound
}
proof, err := n.client.GetProof(ctx, predeploy.WithdrawalContractAddress, toBlockNumArg(number))
proof, err := n.client.GetProof(ctx, common.HexToAddress(predeploys.L2ToL1MessagePasser), toBlockNumArg(number))
if err != nil {
n.log.Error("failed to get contract proof", "err", err)
return nil, err
......@@ -219,7 +219,7 @@ func (n *nodeAPI) GetBatchBundle(ctx context.Context, req *BatchBundleRequest) (
var pruneCount int
for {
if !bundleBuilder.HasNonEmptyCandidate() {
if !bundleBuilder.HasCandidate() {
return bundleBuilder.Response(nil), nil
}
......@@ -236,7 +236,7 @@ func (n *nodeAPI) GetBatchBundle(ctx context.Context, req *BatchBundleRequest) (
// occur since our initial greedy estimate has a very small, bounded
// error tolerance, so simply remove the last block and try again.
if bundleSize > uint64(req.MaxSize) {
bundleBuilder.PruneLastNonEmpty()
bundleBuilder.PruneLast()
pruneCount++
continue
}
......
......@@ -12,9 +12,7 @@ type BundleCandidate struct {
// ID is the block ID of an L2 block.
ID eth.BlockID
// Batch is batch data drived from the L2 Block. If Batch is nil, the block
// is considered to be empty. Empty blocks do not contribute to the size of
// a bundle.
// Batch is batch data drived from the L2 Block.
Batch *derive.BatchData
}
......@@ -24,7 +22,6 @@ type BundleCandidate struct {
type BundleBuilder struct {
prevBlockID eth.BlockID
candidates []BundleCandidate
numNonEmpty int
}
// NewBundleBuilder creates a new instance of a BundleBuilder, where prevBlockID
......@@ -33,52 +30,37 @@ func NewBundleBuilder(prevBlockID eth.BlockID) *BundleBuilder {
return &BundleBuilder{
prevBlockID: prevBlockID,
candidates: nil,
numNonEmpty: 0,
}
}
// AddCandidate appends a candidate block to the BundleBuilder.
func (b *BundleBuilder) AddCandidate(candidate BundleCandidate) {
b.candidates = append(b.candidates, candidate)
if candidate.Batch != nil {
b.numNonEmpty++
}
}
// HasNonEmptyCandidate returns true if there are a non-zero number of
// HasCandidate returns true if there are a non-zero number of
// non-empty bundle candidates.
func (b *BundleBuilder) HasNonEmptyCandidate() bool {
return b.numNonEmpty > 0
func (b *BundleBuilder) HasCandidate() bool {
return len(b.candidates) > 0
}
// PruneLastNonEmpty removes the latest non-empty candidate block and all empty
// blocks follow it. This method is used to reduce the size of the encoded
// PruneLast removes the last candidate block.
// This method is used to reduce the size of the encoded
// bundle in order to satisfy the desired size constraints.
func (b *BundleBuilder) PruneLastNonEmpty() {
if b.numNonEmpty == 0 {
func (b *BundleBuilder) PruneLast() {
if len(b.candidates) == 0 {
return
}
for i := len(b.candidates) - 1; i >= 0; i-- {
candidate := b.candidates[i]
if candidate.Batch != nil {
b.candidates = b.candidates[:i]
b.numNonEmpty--
return
}
}
b.candidates = b.candidates[:len(b.candidates)-1]
}
// Batches returns a slice of all non-nil batches contained within the candidate
// blocks.
func (b *BundleBuilder) Batches() []*derive.BatchData {
var batches = make([]*derive.BatchData, 0, b.numNonEmpty)
var batches = make([]*derive.BatchData, 0, len(b.candidates))
for _, candidate := range b.candidates {
if candidate.Batch != nil {
batches = append(batches, candidate.Batch)
}
batches = append(batches, candidate.Batch)
}
return batches
}
......
......@@ -38,7 +38,7 @@ func createResponse(
func TestNewBundleBuilder(t *testing.T) {
builder := node.NewBundleBuilder(testPrevBlockID)
require.False(t, builder.HasNonEmptyCandidate())
require.False(t, builder.HasCandidate())
require.Equal(t, builder.Batches(), []*derive.BatchData{})
expResponse := createResponse(testPrevBlockID, testPrevBlockID, nil)
require.Equal(t, expResponse, builder.Response(nil))
......@@ -49,31 +49,14 @@ func TestNewBundleBuilder(t *testing.T) {
func TestBundleBuilderAddCandidate(t *testing.T) {
builder := node.NewBundleBuilder(testPrevBlockID)
// Add an empty candidate.
blockID6 := eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
}
builder.AddCandidate(node.BundleCandidate{
ID: blockID6,
Batch: nil,
})
// Should behave the same as completely empty builder except for updated
// last block ID fields.
require.False(t, builder.HasNonEmptyCandidate())
require.Equal(t, builder.Batches(), []*derive.BatchData{})
expResponse := createResponse(testPrevBlockID, blockID6, nil)
require.Equal(t, expResponse, builder.Response(nil))
// Add non-empty candidate.
// Add candidate.
blockID7 := eth.BlockID{
Number: 7,
Hash: common.HexToHash("0x77"),
}
batchData7 := &derive.BatchData{
BatchV1: derive.BatchV1{
Epoch: 7,
Epoch: 3,
Timestamp: 42,
Transactions: []hexutil.Bytes{
hexutil.Bytes([]byte{0x42, 0x07}),
......@@ -85,144 +68,35 @@ func TestBundleBuilderAddCandidate(t *testing.T) {
Batch: batchData7,
})
// HasNonEmptyCandidate should register that we have data to submit to L1,
// HasCandidate should register that we have data to submit to L1,
// last block ID fields should also be updated.
require.True(t, builder.HasNonEmptyCandidate())
require.True(t, builder.HasCandidate())
require.Equal(t, builder.Batches(), []*derive.BatchData{batchData7})
expResponse = createResponse(testPrevBlockID, blockID7, testBundleData)
expResponse := createResponse(testPrevBlockID, blockID7, testBundleData)
require.Equal(t, expResponse, builder.Response(testBundleData))
// Add another empty block.
// Add another block.
blockID8 := eth.BlockID{
Number: 8,
Hash: common.HexToHash("0x88"),
}
batchData8 := &derive.BatchData{
BatchV1: derive.BatchV1{
Epoch: 5,
Timestamp: 44,
Transactions: []hexutil.Bytes{
hexutil.Bytes([]byte{0x13, 0x37}),
},
},
}
builder.AddCandidate(node.BundleCandidate{
ID: blockID8,
Batch: nil,
Batch: batchData8,
})
// Last block ID fields should be updated.
require.True(t, builder.HasNonEmptyCandidate())
require.Equal(t, builder.Batches(), []*derive.BatchData{batchData7})
require.True(t, builder.HasCandidate())
require.Equal(t, builder.Batches(), []*derive.BatchData{batchData7, batchData8})
expResponse = createResponse(testPrevBlockID, blockID8, testBundleData)
require.Equal(t, expResponse, builder.Response(testBundleData))
}
var pruneLastNonEmptyTests = []pruneLastNonEmptyTestCase{
{
name: "no candidates",
candidates: nil,
expResponse: createResponse(testPrevBlockID, testPrevBlockID, nil),
},
{
name: "only empty blocks",
candidates: []node.BundleCandidate{
{
ID: eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
},
Batch: nil,
},
{
ID: eth.BlockID{
Number: 7,
Hash: common.HexToHash("0x77"),
},
Batch: nil,
},
},
expResponse: createResponse(
testPrevBlockID,
eth.BlockID{
Number: 7,
Hash: common.HexToHash("0x77"),
}, nil,
),
},
{
name: "last block is non empty",
candidates: []node.BundleCandidate{
{
ID: eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
},
Batch: nil,
},
{
ID: eth.BlockID{
Number: 7,
Hash: common.HexToHash("0x77"),
},
Batch: &derive.BatchData{},
},
},
expResponse: createResponse(
testPrevBlockID,
eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
}, nil,
),
},
{
name: "non empty block followed by empty block",
candidates: []node.BundleCandidate{
{
ID: eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
},
Batch: nil,
},
{
ID: eth.BlockID{
Number: 7,
Hash: common.HexToHash("0x77"),
},
Batch: &derive.BatchData{},
},
{
ID: eth.BlockID{
Number: 8,
Hash: common.HexToHash("0x88"),
},
Batch: nil,
},
},
expResponse: createResponse(
testPrevBlockID,
eth.BlockID{
Number: 6,
Hash: common.HexToHash("0x66"),
}, nil,
),
},
}
// TestBundleBuilderPruneLastNonEmpty asserts that pruning the BundleBuilder
// always removes the last non-empty block, if one exists, and any subsequent
// empty blocks.
func TestBundleBuilderPruneLastNonEmpty(t *testing.T) {
for _, test := range pruneLastNonEmptyTests {
t.Run(test.name, test.run)
}
}
type pruneLastNonEmptyTestCase struct {
name string
candidates []node.BundleCandidate
expResponse *node.BatchBundleResponse
}
func (tc *pruneLastNonEmptyTestCase) run(t *testing.T) {
builder := node.NewBundleBuilder(testPrevBlockID)
for _, candidate := range tc.candidates {
builder.AddCandidate(candidate)
}
builder.PruneLastNonEmpty()
require.Equal(t, tc.expResponse, builder.Response(nil))
}
......@@ -10,9 +10,9 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
type L2EndpointsSetup interface {
type L2EndpointSetup interface {
// Setup a RPC client to a L2 execution engine to process rollup blocks with.
Setup(ctx context.Context, log log.Logger) (cl []*rpc.Client, err error)
Setup(ctx context.Context, log log.Logger) (cl *rpc.Client, err error)
Check() error
}
......@@ -21,62 +21,53 @@ type L1EndpointSetup interface {
Setup(ctx context.Context, log log.Logger) (cl *rpc.Client, trust bool, err error)
}
type L2EndpointsConfig struct {
L2EngineAddrs []string // Addresses of L2 Engine JSON-RPC endpoints to use (engine and eth namespace required)
type L2EndpointConfig struct {
L2EngineAddr string // Address of L2 Engine JSON-RPC endpoint to use (engine and eth namespace required)
// JWT secrets for L2 Engine API authentication during HTTP or initial Websocket communication, one per L2 engine.
// JWT secrets for L2 Engine API authentication during HTTP or initial Websocket communication.
// Any value for an IPC connection.
L2EngineJWTSecrets [][32]byte
L2EngineJWTSecret [32]byte
}
var _ L2EndpointsSetup = (*L2EndpointsConfig)(nil)
var _ L2EndpointSetup = (*L2EndpointConfig)(nil)
func (cfg *L2EndpointsConfig) Check() error {
if len(cfg.L2EngineAddrs) == 0 {
return errors.New("need at least one L2 engine to connect to")
}
if len(cfg.L2EngineAddrs) != len(cfg.L2EngineJWTSecrets) {
return fmt.Errorf("have %d L2 engines, but %d authentication secrets", len(cfg.L2EngineAddrs), len(cfg.L2EngineJWTSecrets))
func (cfg *L2EndpointConfig) Check() error {
if cfg.L2EngineAddr == "" {
return errors.New("empty L2 Engine Address")
}
return nil
}
func (cfg *L2EndpointsConfig) Setup(ctx context.Context, log log.Logger) ([]*rpc.Client, error) {
func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger) (*rpc.Client, error) {
if err := cfg.Check(); err != nil {
return nil, err
}
var out []*rpc.Client
for i, addr := range cfg.L2EngineAddrs {
auth := rpc.NewJWTAuthProvider(cfg.L2EngineJWTSecrets[i])
l2Node, err := dialRPCClientWithBackoff(ctx, log, addr, auth)
if err != nil {
// close clients again if we cannot complete the full setup
for _, cl := range out {
cl.Close()
}
return out, err
}
out = append(out, l2Node)
auth := rpc.NewJWTAuthProvider(cfg.L2EngineJWTSecret)
l2Node, err := dialRPCClientWithBackoff(ctx, log, cfg.L2EngineAddr, auth)
if err != nil {
return nil, err
}
return out, nil
return l2Node, nil
}
// PreparedL2Endpoints enables testing with in-process pre-setup RPC connections to L2 engines
type PreparedL2Endpoints struct {
Clients []*rpc.Client
Client *rpc.Client
}
func (p *PreparedL2Endpoints) Check() error {
if len(p.Clients) == 0 {
return errors.New("need at least one L2 engine to connect to")
if p.Client == nil {
return errors.New("client cannot be nil")
}
return nil
}
var _ L2EndpointsSetup = (*PreparedL2Endpoints)(nil)
var _ L2EndpointSetup = (*PreparedL2Endpoints)(nil)
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) ([]*rpc.Client, error) {
return p.Clients, nil
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (*rpc.Client, error) {
return p.Client, nil
}
type L1EndpointConfig struct {
......
......@@ -9,8 +9,8 @@ import (
)
type Config struct {
L1 L1EndpointSetup
L2s L2EndpointsSetup
L1 L1EndpointSetup
L2 L2EndpointSetup
Rollup rollup.Config
......@@ -36,7 +36,7 @@ type RPCConfig struct {
// Check verifies that the given configuration makes sense
func (cfg *Config) Check() error {
if err := cfg.L2s.Check(); err != nil {
if err := cfg.L2.Check(); err != nil {
return fmt.Errorf("l2 endpoint config error: %v", err)
}
if err := cfg.Rollup.Check(); err != nil {
......
......@@ -2,9 +2,7 @@ package node
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/libp2p/go-libp2p-core/peer"
......@@ -29,9 +27,8 @@ type OpNode struct {
appVersion string
l1HeadsSub ethereum.Subscription // Subscription to get L1 heads (automatically re-subscribes on error)
l1Source *l1.Source // Source to fetch data from (also implements the Downloader interface)
l2Lock sync.Mutex // Mutex to safely add and use different L2 resources in parallel
l2Engines []*driver.Driver // engines to keep synced
l2Nodes []*rpc.Client // L2 Execution Engines to close at shutdown
l2Engine *driver.Driver // L2 Engine to Sync
l2Node *rpc.Client // L2 Execution Engine RPC connections to close at shutdown
server *rpcServer // RPC server hosting the rollup-node API
p2pNode *p2p.NodeP2P // P2P node functionality
p2pSigner p2p.Signer // p2p gogssip application messages will be signed with this signer
......@@ -76,7 +73,7 @@ func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger)
if err := n.initL1(ctx, cfg); err != nil {
return err
}
if err := n.initL2s(ctx, cfg, snapshotLog); err != nil {
if err := n.initL2(ctx, cfg, snapshotLog); err != nil {
return err
}
if err := n.initP2PSigner(ctx, cfg); err != nil {
......@@ -129,48 +126,26 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
return nil
}
// AttachEngine attaches an engine to the rollup node.
func (n *OpNode) AttachEngine(ctx context.Context, cfg *Config, tag string, cl *rpc.Client, snapshotLog log.Logger) error {
n.l2Lock.Lock()
defer n.l2Lock.Unlock()
engLog := n.log.New("engine", tag)
client, err := l2.NewSource(cl, &cfg.Rollup.Genesis, engLog)
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
rpcClient, err := cfg.L2.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client: %w", err)
}
n.l2Node = rpcClient
client, err := l2.NewSource(rpcClient, &cfg.Rollup.Genesis, n.log)
if err != nil {
cl.Close()
return err
}
snap := snapshotLog.New("engine_addr", tag)
engine := driver.NewDriver(cfg.Rollup, client, n.l1Source, n, engLog, snap, cfg.Sequencer)
n.l2Nodes = append(n.l2Nodes, cl)
n.l2Engines = append(n.l2Engines, engine)
return nil
}
snap := snapshotLog.New()
n.l2Engine = driver.NewDriver(cfg.Rollup, client, n.l1Source, n, n.log, snap, cfg.Sequencer)
func (n *OpNode) initL2s(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
clients, err := cfg.L2s.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client(s): %v", err)
}
for i, cl := range clients {
if err := n.AttachEngine(ctx, cfg, fmt.Sprintf("eng_%d", i), cl, snapshotLog); err != nil {
return fmt.Errorf("failed to attach configured engine %d: %v", i, err)
}
}
return nil
}
func (n *OpNode) initRPCServer(ctx context.Context, cfg *Config) error {
if len(n.l2Nodes) == 0 {
return errors.New("need at least one L2 node to serve rollup RPC")
}
l2Node := n.l2Nodes[0]
// TODO: attach the p2p node ID to the snapshot logger
client, err := l2.NewReadOnlySource(l2Node, &cfg.Rollup.Genesis, n.log)
client, err := l2.NewReadOnlySource(n.l2Node, &cfg.Rollup.Genesis, n.log)
if err != nil {
return err
}
......@@ -214,38 +189,30 @@ func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) Start(ctx context.Context) error {
n.log.Info("Starting execution engine driver(s)")
for _, eng := range n.l2Engines {
// Request initial head update, default to genesis otherwise
reqCtx, reqCancel := context.WithTimeout(ctx, time.Second*10)
// start driving engine: sync blocks by deriving them from L1 and driving them into the engine
err := eng.Start(reqCtx)
reqCancel()
if err != nil {
n.log.Error("Could not start a rollup node", "err", err)
return err
}
n.log.Info("Starting execution engine driver")
// Request initial head update, default to genesis otherwise
reqCtx, reqCancel := context.WithTimeout(ctx, time.Second*10)
// start driving engine: sync blocks by deriving them from L1 and driving them into the engine
err := n.l2Engine.Start(reqCtx)
reqCancel()
if err != nil {
n.log.Error("Could not start a rollup node", "err", err)
return err
}
return nil
}
func (n *OpNode) OnNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
n.l2Lock.Lock()
defer n.l2Lock.Unlock()
n.tracer.OnNewL1Head(ctx, sig)
// fan-out to all engine drivers
for _, eng := range n.l2Engines {
go func(eng *driver.Driver) {
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := eng.OnL1Head(ctx, sig); err != nil {
n.log.Warn("failed to notify engine driver of L1 head change", "err", err)
}
}(eng)
// Pass on the event to the L2 Engine
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := n.l2Engine.OnL1Head(ctx, sig); err != nil {
n.log.Warn("failed to notify engine driver of L1 head change", "err", err)
}
}
func (n *OpNode) PublishL2Payload(ctx context.Context, payload *l2.ExecutionPayload) error {
......@@ -264,9 +231,6 @@ func (n *OpNode) PublishL2Payload(ctx context.Context, payload *l2.ExecutionPayl
}
func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *l2.ExecutionPayload) error {
n.l2Lock.Lock()
defer n.l2Lock.Unlock()
// ignore if it's from ourselves
if n.p2pNode != nil && from == n.p2pNode.Host().ID() {
return nil
......@@ -276,16 +240,13 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *l
n.log.Info("Received signed execution payload from p2p", "id", payload.ID(), "peer", from)
// fan-out to all engine drivers
for _, eng := range n.l2Engines {
go func(eng *driver.Driver) {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
if err := eng.OnUnsafeL2Payload(ctx, payload); err != nil {
n.log.Warn("failed to notify engine driver of new L2 payload", "err", err, "id", payload.ID())
}
}(eng)
// Pass on the event to the L2 Engine
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
if err := n.l2Engine.OnUnsafeL2Payload(ctx, payload); err != nil {
n.log.Warn("failed to notify engine driver of new L2 payload", "err", err, "id", payload.ID())
}
return nil
}
......@@ -302,12 +263,12 @@ func (n *OpNode) Close() error {
}
if n.p2pNode != nil {
if err := n.p2pNode.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %v", err))
result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err))
}
}
if n.p2pSigner != nil {
if err := n.p2pSigner.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %v", err))
result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %w", err))
}
}
......@@ -320,16 +281,18 @@ func (n *OpNode) Close() error {
n.l1HeadsSub.Unsubscribe()
}
// close L2 engines
for _, eng := range n.l2Engines {
if err := eng.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine driver cleanly: %v", err))
// close L2 engine
if n.l2Engine != nil {
if err := n.l2Engine.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine driver cleanly: %w", err))
}
}
// close L2 nodes
for _, n := range n.l2Nodes {
n.Close()
// close L2 node
if n.l2Node != nil {
n.l2Node.Close()
}
// close L1 data source
if n.l1Source != nil {
n.l1Source.Close()
......
......@@ -12,7 +12,8 @@ import (
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/predeploy"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/stretchr/testify/mock"
......@@ -83,7 +84,7 @@ func TestOutputAtBlock(t *testing.T) {
l2Client := &mockL2Client{}
l2Client.mock.On("GetBlockHeader", "latest").Return(&header)
l2Client.mock.On("GetProof", predeploy.WithdrawalContractAddress, "latest").Return(&result)
l2Client.mock.On("GetProof", common.HexToAddress(predeploys.L2ToL1MessagePasser), "latest").Return(&result)
server, err := newRPCServer(context.Background(), rpcCfg, rollupCfg, l2Client, log, "0.0")
assert.NoError(t, err)
......
package predeploy
import "github.com/ethereum/go-ethereum/common"
var WithdrawalContractAddress = common.HexToAddress("0x4200000000000000000000000000000000000016")
......@@ -6,6 +6,7 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -21,8 +22,8 @@ var (
DepositEventABIHash = crypto.Keccak256Hash([]byte(DepositEventABI))
L1InfoFuncSignature = "setL1BlockValues(uint64,uint64,uint256,bytes32,uint64)"
L1InfoFuncBytes4 = crypto.Keccak256([]byte(L1InfoFuncSignature))[:4]
L1InfoPredeployAddr = common.HexToAddress("0x4200000000000000000000000000000000000015")
L1InfoDepositerAddress = common.HexToAddress("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001")
L1BlockAddress = common.HexToAddress(predeploys.L1Block)
)
type UserDepositSource struct {
......@@ -195,7 +196,7 @@ func L1InfoDeposit(seqNumber uint64, block L1Info) (*types.DepositTx, error) {
return &types.DepositTx{
SourceHash: source.SourceHash(),
From: L1InfoDepositerAddress,
To: &L1InfoPredeployAddr,
To: &L1BlockAddress,
Mint: nil,
Value: big.NewInt(0),
Gas: 150_000, // TODO: temporary work around. Block 1 seems to require more gas than specced.
......
......@@ -44,14 +44,14 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return nil, fmt.Errorf("failed to load l1 endpoint info: %v", err)
}
l2Endpoints, err := NewL2EndpointsConfig(ctx, log)
l2Endpoint, err := NewL2EndpointConfig(ctx, log)
if err != nil {
return nil, fmt.Errorf("failed to load l2 endpoints info: %v", err)
}
cfg := &node.Config{
L1: l1Endpoint,
L2s: l2Endpoints,
L2: l2Endpoint,
Rollup: *rollupConfig,
Sequencer: enableSequencing,
RPC: node.RPCConfig{
......@@ -74,38 +74,33 @@ func NewL1EndpointConfig(ctx *cli.Context) (*node.L1EndpointConfig, error) {
}, nil
}
func NewL2EndpointsConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointsConfig, error) {
l2Addrs := ctx.GlobalStringSlice(flags.L2EngineAddrs.Name)
engineJWTSecrets := ctx.GlobalStringSlice(flags.L2EngineJWTSecret.Name)
var secrets [][32]byte
for i, fileName := range engineJWTSecrets {
fileName = strings.TrimSpace(fileName)
if fileName == "" {
return nil, fmt.Errorf("file-name of jwt secret %d is empty", i)
func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConfig, error) {
l2Addr := ctx.GlobalString(flags.L2EngineAddr.Name)
fileName := ctx.GlobalString(flags.L2EngineJWTSecret.Name)
var secret [32]byte
fileName = strings.TrimSpace(fileName)
if fileName == "" {
return nil, fmt.Errorf("file-name of jwt secret is empty")
}
if data, err := os.ReadFile(fileName); err == nil {
jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
if len(jwtSecret) != 32 {
return nil, fmt.Errorf("invalid jwt secret in path %s, not 32 hex-formatted bytes", fileName)
}
copy(secret[:], jwtSecret)
} else {
log.Warn("Failed to read JWT secret from file, generating a new one now. Configure L2 geth with --authrpc.jwt-secret=" + fmt.Sprintf("%q", fileName))
if _, err := io.ReadFull(rand.Reader, secret[:]); err != nil {
return nil, fmt.Errorf("failed to generate jwt secret: %v", err)
}
if data, err := os.ReadFile(fileName); err == nil {
jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
if len(jwtSecret) != 32 {
return nil, fmt.Errorf("invalid jwt secret in path %s, not 32 hex-formatted bytes", fileName)
}
var secret [32]byte
copy(secret[:], jwtSecret)
secrets = append(secrets, secret)
} else {
log.Warn("Failed to read JWT secret from file, generating a new one now. Configure L2 geth with --authrpc.jwt-secret=" + fmt.Sprintf("%q", fileName))
var secret [32]byte
if _, err := io.ReadFull(rand.Reader, secret[:]); err != nil {
return nil, fmt.Errorf("failed to generate jwt secret: %v", err)
}
secrets = append(secrets, secret)
if err := os.WriteFile(fileName, []byte(hexutil.Encode(secret[:])), 0600); err != nil {
return nil, err
}
if err := os.WriteFile(fileName, []byte(hexutil.Encode(secret[:])), 0600); err != nil {
return nil, err
}
}
return &node.L2EndpointsConfig{
L2EngineAddrs: l2Addrs,
L2EngineJWTSecrets: secrets,
return &node.L2EndpointConfig{
L2EngineAddr: l2Addr,
L2EngineJWTSecret: secret,
}, nil
}
......
......@@ -8,7 +8,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/predeploy"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
......@@ -164,7 +164,7 @@ func FinalizeWithdrawalParameters(ctx context.Context, l2client ProofClient, txH
return FinalizedWithdrawalParameters{}, err
}
slot := StorageSlotOfWithdrawalHash(withdrawalHash)
p, err := l2client.GetProof(ctx, predeploy.WithdrawalContractAddress, []string{slot.String()}, header.Number)
p, err := l2client.GetProof(ctx, common.HexToAddress(predeploys.L2ToL1MessagePasser), []string{slot.String()}, header.Number)
if err != nil {
return FinalizedWithdrawalParameters{}, err
}
......
......@@ -22,6 +22,7 @@ COPY packages/common-ts/package.json ./packages/common-ts/package.json
COPY packages/contracts/package.json ./packages/contracts/package.json
COPY packages/contracts-bedrock/package.json ./packages/contracts-bedrock/package.json
COPY packages/contracts-periphery/package.json ./packages/contracts-periphery/package.json
COPY packages/contracts-governance/package.json ./packages/contracts-governance/package.json
COPY packages/data-transport-layer/package.json ./packages/data-transport-layer/package.json
COPY packages/message-relayer/package.json ./packages/message-relayer/package.json
COPY packages/fault-detector/package.json ./packages/fault-detector/package.json
......
......@@ -11,11 +11,18 @@ import prometheus, { Registry } from 'prom-client'
import { Logger } from '../common/logger'
import { Metric } from './metrics'
import { validators } from './validators'
export type Options = {
[key: string]: any
}
export type StandardOptions = {
loopIntervalMs?: number
metricsServerPort?: number
metricsServerHostname?: string
}
export type OptionsSpec<TOptions extends Options> = {
[P in keyof Required<TOptions>]: {
validator: (spec?: Spec<TOptions[P]>) => ValidatorSpec<TOptions[P]>
......@@ -77,7 +84,7 @@ export abstract class BaseServiceV2<
/**
* Service options.
*/
protected readonly options: TOptions
protected readonly options: TOptions & StandardOptions
/**
* Metrics.
......@@ -129,10 +136,30 @@ export abstract class BaseServiceV2<
metricsServerHostname?: string
}) {
this.loop = params.loop !== undefined ? params.loop : true
this.loopIntervalMs =
params.loopIntervalMs !== undefined ? params.loopIntervalMs : 0
this.state = {} as TServiceState
// Add default options to options spec.
;(params.optionsSpec as any) = {
...(params.optionsSpec || {}),
// Users cannot set these options.
loopIntervalMs: {
validator: validators.num,
desc: 'Loop interval in milliseconds',
default: params.loopIntervalMs || 0,
},
metricsServerPort: {
validator: validators.num,
desc: 'Port for the metrics server',
default: params.metricsServerPort || 7300,
},
metricsServerHostname: {
validator: validators.str,
desc: 'Hostname for the metrics server',
default: params.metricsServerHostname || '0.0.0.0',
},
}
/**
* Special snake_case function which accounts for the common strings "L1" and "L2" which would
* normally be split into "L_1" and "L_2" by the snake_case function.
......@@ -241,9 +268,11 @@ export abstract class BaseServiceV2<
// Create the metrics server.
this.metricsRegistry = prometheus.register
this.metricsServerPort = params.metricsServerPort || 7300
this.metricsServerHostname = params.metricsServerHostname || '0.0.0.0'
this.metricsServerPort = this.options.metricsServerPort
this.metricsServerHostname = this.options.metricsServerHostname
// Set up everything else.
this.loopIntervalMs = this.options.loopIntervalMs
this.logger = new Logger({ name: params.name })
// Gracefully handle stop signals.
......
CrossDomainHashing_Test:test_l2TransactionHash() (gas: 78639)
CrossDomainHashing_Test:test_l2TransactionHash() (gas: 103799)
DeployerWhitelist_Test:test_owner() (gas: 7647)
DeployerWhitelist_Test:test_storageSlots() (gas: 33483)
GasPriceOracle_Test:test_baseFee() (gas: 8395)
......@@ -9,8 +9,8 @@ GasPriceOracle_Test:test_onlyOwnerSetOverhead() (gas: 10599)
GasPriceOracle_Test:test_onlyOwnerSetScalar() (gas: 10640)
GasPriceOracle_Test:test_owner() (gas: 9762)
GasPriceOracle_Test:test_setDecimals() (gas: 36798)
GasPriceOracle_Test:test_setGasPriceReverts() (gas: 11659)
GasPriceOracle_Test:test_setL1BaseFeeReverts() (gas: 11658)
GasPriceOracle_Test:test_setGasPriceReverts() (gas: 11718)
GasPriceOracle_Test:test_setL1BaseFeeReverts() (gas: 11717)
GasPriceOracle_Test:test_setOverhead() (gas: 36767)
GasPriceOracle_Test:test_setScalar() (gas: 36840)
GasPriceOracle_Test:test_storageLayout() (gas: 86683)
......@@ -20,9 +20,9 @@ L1BlockTest:test_number() (gas: 7651)
L1BlockTest:test_sequenceNumber() (gas: 7585)
L1BlockTest:test_timestamp() (gas: 7683)
L1BlockTest:test_updateValues() (gas: 28215)
L1BlockNumberTest:test_fallback() (gas: 10755)
L1BlockNumberTest:test_fallback() (gas: 18773)
L1BlockNumberTest:test_getL1BlockNumber() (gas: 10589)
L1BlockNumberTest:test_receive() (gas: 17418)
L1BlockNumberTest:test_receive() (gas: 25436)
L1CrossDomainMessenger_Test:testCannot_L1MessengerPause() (gas: 10909)
L1CrossDomainMessenger_Test:test_L1MessengerMessageVersion() (gas: 8366)
L1CrossDomainMessenger_Test:test_L1MessengerPause() (gas: 31882)
......
......@@ -4,4 +4,5 @@ cache
typechain
coverage.out
.deps
deployments
\ No newline at end of file
deployments
broadcast
......@@ -6,28 +6,22 @@ import { OptimismPortal } from "./OptimismPortal.sol";
import { CrossDomainMessenger } from "../universal/CrossDomainMessenger.sol";
/**
* @custom:proxied
* @title L1CrossDomainMessenger
* @dev The L1 Cross Domain Messenger contract sends messages from L1 to L2, and relays messages
* from L2 onto L1.
* This contract should be deployed behind an upgradable proxy
* @notice The L1CrossDomainMessenger is a message passing interface between L1 and L2 responsible
* for sending and receiving data on the L1 side. Users are encouraged to use this
* interface instead of interacting with lower-level contracts directly.
*/
contract L1CrossDomainMessenger is CrossDomainMessenger {
/*************
* Variables *
*************/
/**
* @notice Address of the OptimismPortal.
*/
OptimismPortal public portal;
/********************
* Public Functions *
********************/
/**
* @notice Initialize the L1CrossDomainMessenger
* @param _portal The OptimismPortal
* @notice Initializes the L1CrossDomainMessenger.
*
* @param _portal Address of the OptimismPortal to send and receive messages through.
*/
function initialize(OptimismPortal _portal) external {
portal = _portal;
......@@ -38,21 +32,22 @@ contract L1CrossDomainMessenger is CrossDomainMessenger {
_initialize(Lib_PredeployAddresses.L2_CROSS_DOMAIN_MESSENGER, blockedSystemAddresses);
}
/**********************
* Internal Functions *
**********************/
/**
* @notice Ensure that the L1CrossDomainMessenger can only be called
* by the OptimismPortal and the L2 sender is the L2CrossDomainMessenger.
* @notice Checks whether the message being sent from the other messenger.
*
* @return True if the message was sent from the messenger, false otherwise.
*/
function _isSystemMessageSender() internal view override returns (bool) {
return msg.sender == address(portal) && portal.l2Sender() == otherMessenger;
}
/**
* @notice Sending a message in the L1CrossDomainMessenger involves
* depositing through the OptimismPortal.
* @notice Sends a message via the OptimismPortal contract.
*
* @param _to Address of the recipient on L2.
* @param _gasLimit Minimum gas limit that the message can be executed with.
* @param _value ETH value to attach to the message and send to the recipient.
* @param _data Data to attach to the message and call the recipient with.
*/
function _sendMessage(
address _to,
......
......@@ -8,13 +8,24 @@ import { ExcessivelySafeCall } from "../libraries/ExcessivelySafeCall.sol";
import { ResourceMetering } from "./ResourceMetering.sol";
/**
* @custom:proxied
* @title OptimismPortal
* This contract should be deployed behind an upgradable proxy.
* @notice The OptimismPortal is a low-level contract responsible for passing messages between L1
* and L2. Messages sent directly to the OptimismPortal have no form of replayability.
* Users are encouraged to use the L1CrossDomainMessenger for a higher-level interface.
*/
contract OptimismPortal is ResourceMetering {
/**
* Emitted when a Transaction is deposited from L1 to L2. The parameters of this
* event are read by the rollup node and used to derive deposit transactions on L2.
* @notice Emitted when a transaction is deposited from L1 to L2. The parameters of this event
* are read by the rollup node and used to derive deposit transactions on L2.
*
* @param from Address that triggered the deposit transaction.
* @param to Address that the deposit transaction is directed to.
* @param mint Amount of ETH to mint to the sender on L2.
* @param value Amount of ETH to send to the recipient.
* @param gasLimit Minimum gas limit that the message can be executed with.
* @param isCreation Whether the message is a contract creation.
* @param data Data to attach to the message and call the recipient with.
*/
event TransactionDeposited(
address indexed from,
......@@ -27,40 +38,42 @@ contract OptimismPortal is ResourceMetering {
);
/**
* Emitted when a withdrawal is finalized
* @notice Emitted when a withdrawal transaction is finalized.
*
* @param withdrawalHash Hash of the withdrawal transaction.
* @param success Whether the withdrawal transaction was successful.
*/
event WithdrawalFinalized(bytes32 indexed, bool success);
event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success);
/**
* Value used to reset the l2Sender, this is more efficient than setting it to zero.
* @notice Value used to reset the l2Sender, this is more efficient than setting it to zero.
*/
address internal constant DEFAULT_L2_SENDER = 0x000000000000000000000000000000000000dEaD;
/**
* Minimum time that must elapse before a withdrawal can be finalized.
* @notice Minimum time (in seconds) that must elapse before a withdrawal can be finalized.
*/
uint256 public immutable FINALIZATION_PERIOD_SECONDS;
/**
* Address of the L2OutputOracle.
* @notice Address of the L2OutputOracle.
*/
L2OutputOracle public immutable L2_ORACLE;
/**
* Public variable which can be used to read the address of the L2 account which initiated the
* withdrawal. Can also be used to determine whether or not execution is occuring downstream of
* a call to finalizeWithdrawalTransaction().
* @notice Address of the L2 account which initiated a withdrawal in this transaction. If the
* of this variable is the default L2 sender address, then we are NOT inside of a call
* to finalizeWithdrawalTransaction.
*/
address public l2Sender = DEFAULT_L2_SENDER;
/**
* A list of withdrawal hashes which have been successfully finalized.
* Used for replay protection.
* @notice A list of withdrawal hashes which have been successfully finalized.
*/
mapping(bytes32 => bool) public finalizedWithdrawals;
/**
* @param _l2Oracle Address of the L2OutputOracle.
* @param _l2Oracle Address of the L2OutputOracle.
* @param _finalizationPeriodSeconds Finalization time in seconds.
*/
constructor(L2OutputOracle _l2Oracle, uint256 _finalizationPeriodSeconds) {
......@@ -69,9 +82,9 @@ contract OptimismPortal is ResourceMetering {
}
/**
* Accepts value so that users can send ETH directly to this contract and have the funds be
* deposited to their address on L2. This is intended as a convenience function for EOAs.
* Contracts should call the depositTransaction() function directly.
* @notice Accepts value so that users can send ETH directly to this contract and have the
* funds be deposited to their address on L2. This is intended as a convenience
* function for EOAs. Contracts should call the depositTransaction() function directly.
*/
receive() external payable {
depositTransaction(msg.sender, msg.value, 100000, false, bytes(""));
......@@ -79,16 +92,15 @@ contract OptimismPortal is ResourceMetering {
/**
* @notice Accepts deposits of ETH and data, and emits a TransactionDeposited event for use in
* deriving deposit transactions. Note that if a deposit is made by a contract, its address will
* be aliased when retrieved using `tx.origin` or `msg.sender`. This can lead to loss of funds
* in some cases which the depositing contract may not have accounted for. Consider using the
* Bridge or CrossDomainMessenger contracts which provide additional safety assurances.
* deriving deposit transactions. Note that if a deposit is made by a contract, its
* address will be aliased when retrieved using `tx.origin` or `msg.sender`. Consider
* using the CrossDomainMessenger contracts for a simpler developer experience.
*
* @param _to The L2 destination address.
* @param _value The ETH value to send in the deposit transaction.
* @param _gasLimit The L2 gasLimit.
* @param _isCreation Whether or not the transaction should be contract creation.
* @param _data The input data.
* @param _to Target address on L2.
* @param _value ETH value to send to the recipient.
* @param _gasLimit Minimum L2 gas limit (can be greater than or equal to this value).
* @param _isCreation Whether or not the transaction is a contract creation.
* @param _data Data to trigger the recipient with.
*/
function depositTransaction(
address _to,
......@@ -119,15 +131,15 @@ contract OptimismPortal is ResourceMetering {
}
/**
* Finalizes a withdrawal transaction.
* @notice Finalizes a withdrawal transaction.
*
* @param _nonce Nonce for the provided message.
* @param _sender Message sender address on L2.
* @param _target Target address on L1.
* @param _value ETH to send to the target.
* @param _gasLimit Gas to be forwarded to the target.
* @param _data Data to send to the target.
* @param _l2Timestamp L2 timestamp of the outputRoot.
* @param _nonce Nonce for the provided message.
* @param _sender Message sender address on L2.
* @param _target Target address on L1.
* @param _value ETH to send to the target.
* @param _gasLimit Minumum gas to be forwarded to the target.
* @param _data Data to send to the target.
* @param _l2Timestamp L2 timestamp of the outputRoot.
* @param _outputRootProof Inclusion proof of the withdrawer contracts storage root.
* @param _withdrawalProof Inclusion proof for the given withdrawal in the withdrawer contract.
*/
......
......@@ -9,11 +9,12 @@ import { Burn } from "../libraries/Burn.sol";
/**
* @title ResourceMetering
* @notice ResourceMetering implements an EIP-1559 style resource metering system where pricing
* updates automatically based on current demand.
* updates automatically based on current demand.
*/
contract ResourceMetering {
/**
* Struct representing current resource parameters.
* @notice Represents the various parameters that control the way in which resources are
* metered. Corresponds to the EIP-1559 resource metering system.
*/
struct ResourceParams {
uint128 prevBaseFee;
......@@ -22,42 +23,42 @@ contract ResourceMetering {
}
/**
* Along with the resource limit, determines the target resource limit.
* @notice Maximum amount of the resource that can be used within this block.
*/
int256 public constant ELASTICITY_MULTIPLIER = 4;
int256 public constant MAX_RESOURCE_LIMIT = 8_000_000;
/**
* Denominator that determines max change on fee per block.
* @notice Along with the resource limit, determines the target resource limit.
*/
int256 public constant BASE_FEE_MAX_CHANGE_DENOMINATOR = 8;
int256 public constant ELASTICITY_MULTIPLIER = 4;
/**
* Maximum amount of deposit gas that can be used within this block.
* @notice Target amount of the resource that should be used within this block.
*/
int256 public constant MAX_RESOURCE_LIMIT = 8_000_000;
int256 public constant TARGET_RESOURCE_LIMIT = MAX_RESOURCE_LIMIT / ELASTICITY_MULTIPLIER;
/**
* Target amount of deposit gas that should be used within this block.
* @notice Denominator that determines max change on fee per block.
*/
int256 public constant TARGET_RESOURCE_LIMIT = MAX_RESOURCE_LIMIT / ELASTICITY_MULTIPLIER;
int256 public constant BASE_FEE_MAX_CHANGE_DENOMINATOR = 8;
/**
* Minimum base fee value, cannot go lower than this.
* @notice Minimum base fee value, cannot go lower than this.
*/
int256 public constant MINIMUM_BASE_FEE = 10_000;
/**
* Initial base fee value.
* @notice Initial base fee value.
*/
uint128 public constant INITIAL_BASE_FEE = 1_000_000_000;
/**
* EIP-1559 style gas parameters.
* @notice EIP-1559 style gas parameters.
*/
ResourceParams public params;
/**
* Sets the initial resource values.
* @notice Sets initial resource parameter values.
*/
constructor() {
params = ResourceParams({
......@@ -68,7 +69,7 @@ contract ResourceMetering {
}
/**
* Meters access to a function based an amount of a requested resource.
* @notice Meters access to a function based an amount of a requested resource.
*
* @param _amount Amount of the resource requested.
*/
......
......@@ -36,7 +36,7 @@
"hardhat": "^2.9.6",
"@rari-capital/solmate": "https://github.com/rari-capital/solmate.git#8f9b23f8838670afda0fd8983f2c41e8037ae6bc",
"ds-test": "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5",
"forge-std": "https://github.com/foundry-rs/forge-std.git#1680d7fb3e00b7b197a7336e7c88e838c7e6a3ec",
"forge-std": "https://github.com/foundry-rs/forge-std.git#564510058ab3db01577b772c275e081e678373f2",
"merkle-patricia-tree": "^4.2.4",
"rlp": "^2.2.7"
},
......
ETHERSCAN_API_KEY=ABC123ABC123ABC123ABC123ABC123ABC1
PRIVATE_KEY=0x...
PRIVATE_KEY_DEPLOYER=
PRIVATE_KEY_TOKEN_DEPLOYER=0x...
L1_PROVIDER_URL=http://localhost:9545
L2_PROVIDER_URL=http://localhost:8545
PRIVATE_KEY_DISTRIBUTOR_DEPLOYER=ABC123
node_modules
artifacts
cache
coverage
module.exports = {
extends: '../../.eslintrc.js',
}
hardhat.config.ts
scripts
test
node_modules
artifacts
cache
coverage*
gasReporterOutput.json
module.exports = {
...require('../../.prettierrc.js'),
}
{
"extends": "solhint:recommended",
"rules": {
"compiler-version": ["error", "^0.8.0"],
"func-visibility": ["warn", { "ignoreConstructors": true }]
}
}
<div align="center">
<a href="https://community.optimism.io"><img alt="Optimism" src="https://user-images.githubusercontent.com/14298799/122151157-0b197500-ce2d-11eb-89d8-6240e3ebe130.png" width=280></a>
<br />
<h1> Optimism Governance Contracts</h1>
</div>
## TL;DR
The token and governance smart contracts for the Optimism DAO. Built using [OpenZeppelin libraries](https://docs.openzeppelin.com/contracts/4.x/) with some customisations. The token is an [ERC20](https://docs.openzeppelin.com/contracts/4.x/api/token/erc20) that is [permissible](https://docs.openzeppelin.com/contracts/4.x/api/token/erc20#ERC20Permit) and allows for [delegate voting](https://docs.openzeppelin.com/contracts/4.x/api/token/erc20#ERC20Votes). The token is also [burnable](https://docs.openzeppelin.com/contracts/4.x/api/token/erc20#ERC20Burnable). See more in the [Specification section](#specification).
Governance will initially be handled by [Snapshot](https://snapshot.org/#/) before moving to an on chain governance system like [OpenZeppelins Governance contracts](https://docs.openzeppelin.com/contracts/4.x/api/governance).
## Getting set up
### Requirements
You will need the following dependancies installed:
```
nvm
node
yarn
npx
```
Instal the required packages by running:
```
nvm use
yarn
```
#### Compile
To compile the smart contracts run:
```
yarn build
```
#### Test
To run the tests run:
```
yarn test
```
#### Lint
To run the linter run:
```
yarn lint
```
#### Coverage
For coverage run:
```
yarn test:coverage
```
#### Deploying
To deploy the contracts you will first need to set up the environment variables.
Duplicate the [`.env.example`](./.env.example) file. Rename the duplicate to `.env`.
Fill in the missing environment variables, take care with the specified required formatting of secrets.
Then run the command for your desired network:
```
# To deploy on Optimism Kovan
yarn deploy-op-kovan
# To deploy on Optimism
yarn deploy-op-main
```
---
## Specification
Below we will cover the specifications for the various elements of this repository.
### Governance Token
The [`GovernanceToken.sol`](./contracts/GovernanceToken.sol) contract is a basic ERC20 token, with the following modifications:
***Non-upgradable**
* This token is not upgradable.
***Ownable**
* This token has an owner role to allow for permissioned minting functionality.
***Mintable**
* The `OP` token is an inflationary token. We allow for up to 2% annual inflation supply to be minted by the token `MintManager`.
***Burnable**
* The token allows for tokens to be burnt, as well as allowing approved spenders to burn tokens from users.
* 🛠 **Permittable**
* This token is permittable as defined by [EIP2612](https://eips.ethereum.org/EIPS/eip-2612). This allows users to approve a spender without submitting an onchain transaction through the use of signed messages.
* **Delegate voting**
* This token inherits Open Zeppelins ERC20Votes.sol to allow users to delegate voting power. This requires the token be permittable.
### Mint Manager
The [`MintManager.sol`](./contracts/MintManager.sol) contract is set as the `owner` of the OP token and is responsible for the token inflation schedule. It acts as the token "mint manager" with permission to the `mint` function only.
The current implementation allows minting once per year of up to 2% of the total token supply.
The contract is also upgradable to allow changes in the inflation schedule.
### Snapshot Voting Strategy
(WIP)
### Governance (DAO) Contracts
(WIP)
\ No newline at end of file
// SPDX-License-Identifier: MIT
pragma solidity 0.8.12;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
import "@openzeppelin/contracts/token/ERC20/extensions/ERC20Burnable.sol";
import "@openzeppelin/contracts/token/ERC20/extensions/ERC20Votes.sol";
import "@openzeppelin/contracts/access/Ownable.sol";
/**
* @dev The Optimism token used in governance and supporting voting and delegation.
* Implements EIP 2612 allowing signed approvals.
* Contract is "owned" by a `MintManager` instance with permission to the `mint` function only,
* for the purposes of enforcing the token inflation schedule.
*/
contract GovernanceToken is ERC20Burnable, ERC20Votes, Ownable {
/**
* @dev Constructor.
*/
constructor() ERC20("Optimism", "OP") ERC20Permit("Optimism") {}
function mint(address _account, uint256 _amount) public onlyOwner {
_mint(_account, _amount);
}
// The following functions are overrides required by Solidity.
function _afterTokenTransfer(
address from,
address to,
uint256 amount
) internal override(ERC20, ERC20Votes) {
super._afterTokenTransfer(from, to, amount);
}
function _mint(address to, uint256 amount) internal override(ERC20, ERC20Votes) {
super._mint(to, amount);
}
function _burn(address account, uint256 amount) internal override(ERC20, ERC20Votes) {
super._burn(account, amount);
}
}
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.12;
import "@openzeppelin/contracts/token/ERC20/IERC20.sol";
import "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol";
import "./interfaces/IMerkleDistributor.sol";
contract MerkleDistributor is IMerkleDistributor {
address public immutable override token;
bytes32 public immutable override merkleRoot;
uint256 public constant ONE_YEAR_IN_SECONDS = 31_536_000;
uint256 public immutable activationTimestamp;
address public immutable airdropTreasury;
bool public isActive;
// This is a packed array of booleans.
mapping(uint256 => uint256) private claimedBitMap;
event Finalised(address indexed calledBy, uint256 timestamp, uint256 unclaimedAmount);
constructor(
address token_,
bytes32 merkleRoot_,
address _treasury
) {
token = token_;
merkleRoot = merkleRoot_;
activationTimestamp = block.timestamp;
isActive = true;
airdropTreasury = _treasury;
}
function isClaimed(uint256 index) public view override returns (bool) {
uint256 claimedWordIndex = index / 256;
uint256 claimedBitIndex = index % 256;
uint256 claimedWord = claimedBitMap[claimedWordIndex];
uint256 mask = (1 << claimedBitIndex);
return claimedWord & mask == mask;
}
function _setClaimed(uint256 index) private {
uint256 claimedWordIndex = index / 256;
uint256 claimedBitIndex = index % 256;
claimedBitMap[claimedWordIndex] = claimedBitMap[claimedWordIndex] | (1 << claimedBitIndex);
}
function claim(
uint256 index,
address account,
uint256 amount,
bytes32[] calldata merkleProof
) external override {
require(!isClaimed(index), "MerkleDistributor: Drop already claimed.");
// Verify the merkle proof.
bytes32 node = keccak256(abi.encodePacked(index, account, amount));
require(
MerkleProof.verify(merkleProof, merkleRoot, node),
"MerkleDistributor: Invalid proof."
);
// Mark it claimed and send the token.
_setClaimed(index);
require(IERC20(token).transfer(account, amount), "MerkleDistributor: Transfer failed.");
emit Claimed(index, account, amount);
}
/**
* @dev Finalises the airdrop and sweeps unclaimed tokens into the Optimism multisig
*/
function clawBack() external {
// Airdrop can only be finalised once
require(isActive, "Airdrop: Already finalised");
// Airdrop will remain open for one year
require(
block.timestamp >= activationTimestamp + ONE_YEAR_IN_SECONDS,
"Airdrop: Drop should remain open for one year"
);
// Deactivate airdrop
isActive = false;
// Sweep unclaimed tokens
uint256 amount = IERC20(token).balanceOf(address(this));
require(
IERC20(token).transfer(airdropTreasury, amount),
"Airdrop: Finalise transfer failed"
);
emit Finalised(msg.sender, block.timestamp, amount);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.12;
import "@openzeppelin/contracts/access/Ownable.sol";
import "./GovernanceToken.sol";
/**
* @dev Set as `owner` of the OP token and responsible for the token inflation schedule.
* Contract acts as the token "mint manager" with permission to the `mint` function only.
* Currently permitted to mint once per year of up to 2% of the total token supply.
* Upgradable to allow changes in the inflation schedule.
*/
contract MintManager is Ownable {
GovernanceToken public governanceToken;
uint256 public constant MINT_CAP = 200; // 2%
uint256 public constant MINT_PERIOD = 365 days;
uint256 public mintPermittedAfter;
constructor(address _upgrader, address _governanceToken) {
transferOwnership(_upgrader);
governanceToken = GovernanceToken(_governanceToken);
}
/**
* @param _account Address to mint new tokens to.
* @param _amount Amount of tokens to be minted.
* @notice Only the token owner is allowed to mint.
*/
function mint(address _account, uint256 _amount) public onlyOwner {
if (mintPermittedAfter > 0) {
require(mintPermittedAfter <= block.timestamp, "OP: minting not permitted yet");
require(
_amount <= (governanceToken.totalSupply() * MINT_CAP) / 1000,
"OP: mint amount exceeds cap"
);
}
governanceToken.mint(_account, _amount);
mintPermittedAfter = block.timestamp + MINT_PERIOD;
}
function upgrade(address _newMintManager) public onlyOwner {
require(_newMintManager != address(0), "OP: Mint manager cannot be empty");
governanceToken.transferOwnership(_newMintManager);
}
}
// SPDX-License-Identifier: UNLICENSED
pragma solidity >=0.8.12;
// Allows anyone to claim a token if they exist in a merkle root.
interface IMerkleDistributor {
// Returns the address of the token distributed by this contract.
function token() external view returns (address);
// Returns the merkle root of the merkle tree containing account balances available to claim.
function merkleRoot() external view returns (bytes32);
// Returns true if the index has been marked claimed.
function isClaimed(uint256 index) external view returns (bool);
// Claim the given amount of the token to the given address. Reverts if the inputs are invalid.
function claim(
uint256 index,
address account,
uint256 amount,
bytes32[] calldata merkleProof
) external;
// This event is triggered whenever a call to #claim succeeds.
event Claimed(uint256 index, address account, uint256 amount);
}
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.12;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
contract TestERC20 is ERC20 {
constructor(
string memory name_,
string memory symbol_,
uint256 amountToMint
) ERC20(name_, symbol_) {
setBalance(msg.sender, amountToMint);
}
// sets the balance of the address
// this mints/burns the amount depending on the current balance
function setBalance(address to, uint256 amount) public {
uint256 old = balanceOf(to);
if (old < amount) {
_mint(to, amount - old);
} else if (old > amount) {
_burn(to, old - amount);
}
}
}
import dotenv from 'dotenv'
import '@nomiclabs/hardhat-ethers'
import '@nomiclabs/hardhat-etherscan'
import '@nomiclabs/hardhat-waffle'
import 'hardhat-gas-reporter'
import 'solidity-coverage'
import { task, types } from 'hardhat/config'
import { providers, utils, Wallet } from 'ethers'
import { CrossChainMessenger } from '@eth-optimism/sdk'
import './scripts/deploy-token'
import './scripts/multi-send'
import './scripts/mint-initial-supply'
import './scripts/generate-merkle-root'
import './scripts/create-airdrop-json'
import './scripts/deploy-distributor'
import './scripts/test-claims'
import './scripts/create-distributor-json'
dotenv.config()
task('accounts', 'Prints the list of accounts').setAction(async (args, hre) => {
const accounts = await hre.ethers.getSigners()
for (const account of accounts) {
console.log(account.address)
}
})
task('deposit', 'Deposits funds onto Optimism.')
.addParam('to', 'Recipient address.', null, types.string)
.addParam('amountEth', 'Amount in ETH to send.', null, types.string)
.addParam('l1ProviderUrl', '', process.env.L1_PROVIDER_URL, types.string)
.addParam('l2ProviderUrl', '', process.env.L2_PROVIDER_URL, types.string)
.addParam('privateKey', '', process.env.PRIVATE_KEY, types.string)
.setAction(async (args) => {
const { to, amountEth, l1ProviderUrl, l2ProviderUrl, privateKey } = args
if (!l1ProviderUrl || !l2ProviderUrl || !privateKey) {
throw new Error(
'You must define --l1-provider-url, --l2-provider-url, --private-key in your environment.'
)
}
const l1Provider = new providers.JsonRpcProvider(l1ProviderUrl)
const l1Wallet = new Wallet(privateKey, l1Provider)
const messenger = new CrossChainMessenger({
l1SignerOrProvider: l1Wallet,
l2SignerOrProvider: l2ProviderUrl,
l1ChainId: (await l1Provider.getNetwork()).chainId,
})
const amountWei = utils.parseEther(amountEth)
console.log(`Depositing ${amountEth} ETH to ${to}...`)
const tx = await messenger.depositETH(amountWei, {
recipient: to,
})
console.log(`Got TX hash ${tx.hash}. Waiting...`)
await tx.wait()
const l2Provider = new providers.JsonRpcProvider(l2ProviderUrl)
const l1WalletOnL2 = new Wallet(privateKey, l2Provider)
await l1WalletOnL2.sendTransaction({
to,
value: utils.parseEther(amountEth),
})
const balance = await l2Provider.getBalance(to)
console.log('Funded account balance', balance.toString())
console.log('Done.')
})
const privKey = process.env.PRIVATE_KEY || '0x' + '11'.repeat(32)
/**
* @type import("hardhat/config").HardhatUserConfig
*/
module.exports = {
solidity: '0.8.12',
networks: {
optimism: {
chainId: 17,
url: 'http://localhost:8545',
saveDeployments: false,
},
'optimism-kovan': {
chainId: 69,
url: 'https://kovan.optimism.io',
accounts: [privKey],
},
'optimism-nightly': {
chainId: 421,
url: 'https://goerli-nightly-us-central1-a-sequencer.optimism.io',
saveDeployments: true,
accounts: [privKey],
},
'optimism-mainnet': {
chainId: 10,
url: 'https://mainnet.optimism.io',
accounts: [privKey],
},
'hardhat-node': {
url: 'http://localhost:9545',
saveDeployments: false,
},
},
gasReporter: {
enabled: process.env.REPORT_GAS !== undefined,
currency: 'USD',
},
etherscan: {
apiKey: process.env.ETHERSCAN_API_KEY,
},
}
{
"name": "@eth-optimism/contracts-governance",
"version": "0.1.0",
"author": "Optimism PBC",
"license": "MIT",
"main": "dist/index",
"types": "dist/index",
"files": [
"dist/**/*.js",
"dist/**/*.d.ts",
"dist/types/*.ts",
"artifacts/**/*.json",
"deployments/**/*.json"
],
"scripts": {
"build": "npx hardhat compile",
"test": "npx hardhat test",
"test:coverage": "IS_COVERAGE=true npx hardhat coverage",
"lint:js:check": "eslint . --max-warnings=0",
"lint:contracts:check": "yarn solhint -f table 'contracts/**/*.sol'",
"lint:check": "yarn lint:contracts:check && yarn lint:js:check",
"lint:js:fix": "eslint --fix .",
"lint:contracts:fix": "yarn prettier --write 'contracts/**/*.sol'",
"lint:fix": "yarn lint:contracts:fix && yarn lint:js:fix",
"lint": "yarn lint:fix && yarn lint:check",
"deploy:test": "hardhat deploy-token --network optimism",
"deploy:kovan": "hardhat deploy-token --network 'optimism-kovan'",
"deploy:mainnet": "hardhat deploy-token --network 'optimism-mainnet'"
},
"dependencies": {
"@eth-optimism/sdk": "^1.0.1",
"@ethersproject/hardware-wallets": "^5.6.1",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-etherscan": "^3.0.1",
"@nomiclabs/hardhat-waffle": "^2.0.2",
"@nomiclabs/hardhat-web3": "^2.0.0",
"@openzeppelin/contracts": "4.5.0",
"commander": "^9.3.0",
"csv-parse": "^5.0.4",
"ethereumjs-util": "^7.1.4",
"eth-sig-util": "^3.0.1",
"ethers": "^5.6.8",
"hardhat": "^2.9.6"
},
"devDependencies": {
"@types/mocha": "^9.1.1",
"chai": "^4.3.6",
"dotenv": "^16.0.0",
"eslint": "^8.9.0",
"ethereum-waffle": "^3.4.0",
"hardhat-gas-reporter": "^1.0.7",
"prettier": "^2.3.1",
"prettier-plugin-solidity": "^1.0.0-beta.18",
"solhint": "^3.3.6",
"solidity-coverage": "^0.7.19",
"ts-node": "^10.0.0",
"typescript": "^4.6.2"
}
}
import fs from 'fs'
import { task } from 'hardhat/config'
import { parse } from 'csv-parse'
import { BigNumber } from 'ethers'
task('create-airdrop-json')
.addParam('inFile', 'Location of the airdrop CSV')
.addParam('outFile', 'Where to write the outputted JSON')
.setAction(async (args, hre) => {
const out: { [k: string]: BigNumber } = {}
let total = BigNumber.from(0)
console.log('Reading...')
const parser = fs.createReadStream(args.inFile).pipe(parse())
let isHeader = true
for await (const record of parser) {
if (isHeader) {
isHeader = false
continue
}
const addr = record[0]
const amount = record[record.length - 1]
total = total.add(amount)
out[addr] = amount
}
console.log('Writing...')
fs.writeFileSync(args.outFile, JSON.stringify(out, null, ' '))
console.log(
`Total airdrop tokens: ${hre.ethers.utils.formatEther(
total.toString()
)} (${total.toString()})`
)
console.log(`Total airdrop addrs: ${Object.keys(out).length}`)
console.log('Verifying...')
let verTotal = BigNumber.from(0)
const data = JSON.parse(fs.readFileSync(args.outFile).toString('utf-8'))
for (const [addr, amount] of Object.entries(data)) {
if (out[addr] !== amount) {
throw new Error('Value mismatch!')
}
verTotal = verTotal.add(amount as any)
}
if (!total.eq(verTotal)) {
throw new Error('Total mismatch!')
}
console.log('OK')
})
import fs from 'fs'
import { task } from 'hardhat/config'
import { parse } from 'csv-parse'
import { BigNumber } from 'ethers'
task('create-distributor-json')
.addParam('inFile', 'CSV to read')
.addParam('outFile', 'JSON to create')
.addOptionalParam(
'mnemonic',
'Mnemonic',
process.env.DISTRIBUTOR_FALLBACK_MNEMONIC
)
.setAction(async (args, hre) => {
const parser = fs.createReadStream(args.inFile).pipe(parse())
const records = []
let total = BigNumber.from(0)
for await (const record of parser) {
const name = record[0].trim()
const amt = record[record.length - 4].trim().replace(/,/gi, '')
const address = record[record.length - 3].trim()
records.push({
name,
amountHuman: amt,
amount: hre.ethers.utils.parseEther(amt).toString(),
address,
fallbackIndex: -1,
})
total = total.add(amt)
}
records.sort((a, b) => {
if (a.name > b.name) {
return 1
}
if (a.name < b.name) {
return -1
}
return 0
})
for (let i = 0; i < records.length; i++) {
const record = records[i]
if (record.address.slice(0, 2) !== '0x') {
console.log(
`Generating fallback address for ${record.name}. Account index: ${i}`
)
const wallet = hre.ethers.Wallet.fromMnemonic(
args.mnemonic,
`m/44'/60'/0'/0/${i}`
)
record.address = wallet.address
record.fallbackIndex = i
}
}
fs.writeFileSync(args.outFile, JSON.stringify(records, null, ' '))
console.log(`Total: ${total.toString()}`)
if (total.eq(1_434_262_041)) {
console.log('AMOUNTS VERIFIED')
} else {
throw new Error('AMOUNTS INVALID')
}
})
import fs from 'fs'
import { task } from 'hardhat/config'
import dotenv from 'dotenv'
import { BigNumber } from 'ethers'
import { MerkleDistributorInfo } from '../src/parse-balance-map'
import { prompt } from '../src/prompt'
dotenv.config()
task('deploy-distributor')
.addParam(
'pkDeployer',
'Private key of the minter',
process.env.PRIVATE_KEY_DISTRIBUTOR_DEPLOYER
)
.addParam(
'treasuryAddr',
'Address airdrops can be swept to if left unclaimed for a year. Defaults to the OP multisig',
'0x2e128664036fa6AAdFEA521fd2Ce192309c25242'
)
.addParam('inFile', 'Location of the Merkle roots JSON file')
.setAction(async (args, hre) => {
const file = fs.readFileSync(args.inFile).toString()
const data = JSON.parse(file) as MerkleDistributorInfo
const deployer = new hre.ethers.Wallet(args.pkDeployer).connect(
hre.ethers.provider
)
console.log(
`About to deploy the MerkleDistributor with the following parameters:`
)
console.log(`Network: ${hre.network.name}`)
console.log('Token addr: 0x4200000000000000000000000000000000000042')
console.log(`Merkle root: ${data.merkleRoot}`)
console.log(`Treasury addr: ${args.treasuryAddr}`)
console.log(`Deployer addr: ${deployer.address}`)
console.log(
`Deployer balance: ${hre.ethers.utils.formatEther(
await deployer.getBalance()
)}`
)
await prompt('Is this OK?')
const factory = await hre.ethers.getContractFactory('MerkleDistributor')
const contract = await factory
.connect(deployer)
.deploy(
'0x4200000000000000000000000000000000000042',
data.merkleRoot,
args.treasuryAddr,
{
gasLimit: 3000000,
}
)
console.log(
`Deploying distributor in ${contract.deployTransaction.hash}...`
)
await contract.deployed()
console.log(
`Deployed distributor at ${
contract.address
}. Please fund the contract with ${BigNumber.from(
data.tokenTotal
).toString()} OP.`
)
})
import { task, types } from 'hardhat/config'
import { ethers } from 'ethers'
import { LedgerSigner } from '@ethersproject/hardware-wallets'
import dotenv from 'dotenv'
import { prompt } from '../src/prompt'
dotenv.config()
// Hardcode the expected addresse
const addresses = {
governanceToken: '0x4200000000000000000000000000000000000042',
}
task('deploy-token', 'Deploy governance token and its mint manager contracts')
.addParam('mintManagerOwner', 'Owner of the mint manager')
.addOptionalParam('useLedger', 'User ledger hardware wallet as signer')
.addOptionalParam(
'ledgerTokenDeployerPath',
'Ledger key derivation path for the token deployer account',
ethers.utils.defaultPath,
types.string
)
.addParam(
'pkDeployer',
'Private key for main deployer account',
process.env.PRIVATE_KEY_DEPLOYER
)
.addOptionalParam(
'pkTokenDeployer',
'Private key for the token deployer account',
process.env.PRIVATE_KEY_TOKEN_DEPLOYER
)
.setAction(async (args, hre) => {
console.log('Deploying token to', hre.network.name, 'network')
// There cannot be two ledgers at the same time
let tokenDeployer
// Deploy the token
if (args.useLedger) {
// Token is deployed to a system address at `0x4200000000000000000000000000000000000042`
// For that a dedicated deployer account is used
tokenDeployer = new LedgerSigner(
hre.ethers.provider,
'default',
args.ledgerTokenDeployerPath
)
} else {
tokenDeployer = new hre.ethers.Wallet(args.pkTokenDeployer).connect(
hre.ethers.provider
)
}
// Create the MintManager Deployer
const deployer = new hre.ethers.Wallet(args.pkDeployer).connect(
hre.ethers.provider
)
// Get the sizes of the bytecode to check if the contracts
// have already been deployed. Useful for an error partway through
// the script
const governanceTokenCode = await hre.ethers.provider.getCode(
addresses.governanceToken
)
const addrTokenDeployer = await tokenDeployer.getAddress()
console.log(`Using token deployer: ${addrTokenDeployer}`)
const tokenDeployerBalance = await tokenDeployer.getBalance()
if (tokenDeployerBalance.eq(0)) {
throw new Error(`Token deployer has no balance`)
}
console.log(`Token deployer balance: ${tokenDeployerBalance.toString()}`)
const nonceTokenDeployer = await tokenDeployer.getTransactionCount()
console.log(`Token deployer nonce: ${nonceTokenDeployer}`)
const GovernanceToken = await hre.ethers.getContractFactory(
'GovernanceToken'
)
let governanceToken = GovernanceToken.attach(
addresses.governanceToken
).connect(tokenDeployer)
if (nonceTokenDeployer === 0 && governanceTokenCode === '0x') {
await prompt('Ready to deploy. Does everything look OK?')
// Deploy the GovernanceToken
governanceToken = await GovernanceToken.connect(tokenDeployer).deploy()
const tokenReceipt = await governanceToken.deployTransaction.wait()
console.log('GovernanceToken deployed to:', tokenReceipt.contractAddress)
if (tokenReceipt.contractAddress !== addresses.governanceToken) {
console.log(
`Expected governance token address ${addresses.governanceToken}`
)
console.log(`Got ${tokenReceipt.contractAddress}`)
throw new Error(`Fatal error! Mismatch of governance token address`)
}
} else {
console.log(
`GovernanceToken already deployed at ${addresses.governanceToken}, skipping`
)
console.log(`Deployer nonce: ${nonceTokenDeployer}`)
console.log(`Code size: ${governanceTokenCode.length}`)
}
const { mintManagerOwner } = args
// Do the deployer things
console.log('Deploying MintManager')
const addr = await deployer.getAddress()
console.log(`Using MintManager deployer: ${addr}`)
const deployerBalance = await deployer.getBalance()
if (deployerBalance.eq(0)) {
throw new Error('Deployer has no balance')
}
console.log(`Deployer balance: ${deployerBalance.toString()}`)
const deployerNonce = await deployer.getTransactionCount()
console.log(`Deployer nonce: ${deployerNonce}`)
await prompt('Does this look OK?')
const MintManager = await hre.ethers.getContractFactory('MintManager')
// Deploy the MintManager
console.log(
`Deploying MintManager with (${mintManagerOwner}, ${addresses.governanceToken})`
)
const mintManager = await MintManager.connect(deployer).deploy(
mintManagerOwner,
addresses.governanceToken
)
const receipt = await mintManager.deployTransaction.wait()
console.log(`Deployed mint manager to ${receipt.contractAddress}`)
let mmOwner = await mintManager.owner()
const currTokenOwner = await governanceToken
.attach(addresses.governanceToken)
.owner()
console.log(
'About to transfer ownership of the token to the mint manager! This is irreversible.'
)
console.log(`Current token owner: ${currTokenOwner}`)
console.log(`Mint manager address: ${mintManager.address}`)
console.log(`Mint manager owner: ${mmOwner}`)
await prompt('Is this OK?')
console.log('Transferring ownership...')
// Transfer ownership of the token to the MintManager instance
const tx = await governanceToken
.attach(addresses.governanceToken)
.transferOwnership(mintManager.address)
await tx.wait()
console.log(
`Transferred ownership of governance token to ${mintManager.address}`
)
console.log('MintManager deployed to:', receipt.contractAddress)
console.log('MintManager owner set to:', mintManagerOwner)
console.log(
'MintManager governanceToken set to:',
addresses.governanceToken
)
console.log('### Token deployment complete ###')
const tokOwner = await governanceToken
.attach(addresses.governanceToken)
.owner()
if (tokOwner !== mintManager.address) {
throw new Error(`GovernanceToken owner not set correctly`)
}
// Check that the deployment went as expected
const govToken = await mintManager.governanceToken()
if (govToken !== addresses.governanceToken) {
throw new Error(`MintManager governance token not set correctly`)
}
mmOwner = await mintManager.owner()
if (mmOwner !== mintManagerOwner) {
throw new Error(`MintManager owner not set correctly`)
}
console.log('Validated MintManager config')
})
import fs from 'fs'
import { task } from 'hardhat/config'
import { parseBalanceMap } from '../src/parse-balance-map'
task('generate-merkle-root')
.addParam(
'inFile',
'Input JSON file location containing a map of account addresses to string balances'
)
.addParam('outFile', 'Output JSON file location for the Merkle data.')
.setAction(async (args, hre) => {
console.log('Reading balances map...')
const json = JSON.parse(fs.readFileSync(args.inFile, { encoding: 'utf8' }))
if (typeof json !== 'object') {
throw new Error('Invalid JSON')
}
console.log('Parsing balances map...')
const data = parseBalanceMap(json)
console.log('Writing claims...')
fs.writeFileSync(args.outFile, JSON.stringify(data, null, ' '))
console.log(`Merkle root: ${data.merkleRoot}`)
console.log(`Token total: ${hre.ethers.utils.formatEther(data.tokenTotal)}`)
console.log(`Num claims: ${Object.keys(data.claims).length}`)
})
import { task } from 'hardhat/config'
import { ethers } from 'ethers'
import dotenv from 'dotenv'
import { prompt } from '../src/prompt'
dotenv.config()
task('mint-initial-supply', 'Mints the initial token supply')
.addParam('mintManagerAddr', 'Address of the mint manager')
.addParam('amount', 'Amount to mint (IN WHOLE OP)', '4294967296')
.addParam(
'pkMinter',
'Private key of the minter',
process.env.PRIVATE_KEY_INITIAL_MINTER
)
.setAction(async (args, hre) => {
const minter = new hre.ethers.Wallet(args.pkMinter).connect(
hre.ethers.provider
)
const amount = args.amount
const amountBase = ethers.utils.parseEther(amount)
console.log('Please verify initial mint amount and recipient.')
console.log('!!! THIS IS A ONE-WAY ACTION !!!')
console.log('')
console.log(`Amount: ${args.amount}`)
console.log(`Amount (base units): ${amountBase.toString()}`)
console.log(`Recipient: ${minter.address}`)
console.log('')
const govToken = await hre.ethers.getContractAt(
'GovernanceToken',
'0x4200000000000000000000000000000000000042'
)
const mintManager = (
await hre.ethers.getContractAt('MintManager', args.mintManagerAddr)
).connect(minter)
const permittedAfter = await mintManager.mintPermittedAfter()
if (!permittedAfter.eq(0)) {
throw new Error('Mint manager has already executed.')
}
const owner = await mintManager.owner()
if (minter.address !== owner) {
throw new Error(
`Mint manager is owned by ${owner}, not ${minter.address}`
)
}
const tokOwner = await govToken.owner()
if (mintManager.address !== tokOwner) {
throw new Error(
`Gov token is owned by ${tokOwner}, not ${mintManager.address}`
)
}
await prompt('Is this OK?')
const tx = await mintManager.mint(minter.address, amountBase, {
gasLimit: 3_000_000,
})
console.log(`Sent transaction ${tx.hash}`)
await tx.wait()
console.log('Successfully minted. Verifying...')
const supply = await govToken.totalSupply()
if (supply.eq(amountBase)) {
console.log('Total supply verified.')
} else {
console.log(
`Total supply invalid! Have: ${supply.toString()}, want: ${amountBase.toString()}.`
)
}
const bal = await govToken.balanceOf(minter.address)
if (bal.eq(amountBase)) {
console.log('Balance verified.')
} else {
console.log(
`Minter balance invalid! Have: ${bal.toString()}, want: ${amountBase.toString()}.`
)
}
})
import fs from 'fs'
import { task } from 'hardhat/config'
import dotenv from 'dotenv'
import { prompt } from '../src/prompt'
dotenv.config()
task('multi-send', 'Send tokens to multiple addresses')
.addOptionalParam(
'privateKey',
'Private Key for deployer account',
process.env.PRIVATE_KEY_MULTI_SEND
)
.addParam('inFile', 'Distribution file')
.setAction(async (args, hre) => {
console.log(`Starting multi send on ${hre.network.name} network`)
// Load the distribution setup
const distributionJson = fs.readFileSync(args.inFile).toString()
const distribution = JSON.parse(distributionJson)
const sender = new hre.ethers.Wallet(args.privateKey).connect(
hre.ethers.provider
)
const addr = await sender.getAddress()
console.log(`Using deployer: ${addr}`)
console.log('Performing multi send to the following addresses:')
for (const [address, amount] of Object.entries(distribution)) {
console.log(
`${address}: ${amount} (${hre.ethers.utils.parseEther(
amount as string
)})`
)
}
await prompt('Is this OK?')
const governanceToken = (
await hre.ethers.getContractAt(
'GovernanceToken',
'0x4200000000000000000000000000000000000042'
)
).connect(sender)
for (const [address, amount] of Object.entries(distribution)) {
const amountBase = hre.ethers.utils.parseEther(amount as string)
console.log(`Transferring ${amountBase} tokens to ${address}...`)
const transferTx = await governanceToken.transfer(address, amountBase)
console.log(`Waiting for tx ${transferTx.hash}`)
await transferTx.wait()
}
console.log('Done.')
})
import fs from 'fs'
import { task } from 'hardhat/config'
import { MerkleDistributorInfo } from '../src/parse-balance-map'
task('test-claims')
.addParam('inFile', 'Input claims file')
.addParam('distributorAddress', 'Address of the distributor')
.setAction(async (args, hre) => {
const distrib = (
await hre.ethers.getContractAt(
'MerkleDistributor',
args.distributorAddress
)
).connect(hre.ethers.provider)
console.log('Reading claims...')
const json = JSON.parse(
fs.readFileSync(args.inFile, { encoding: 'utf8' })
) as MerkleDistributorInfo
console.log('Smoke testing 100 random claims.')
const addresses = Object.keys(json.claims)
for (let i = 0; i < 100; i++) {
const index = Math.floor(addresses.length * Math.random())
const addr = addresses[index]
const claim = json.claims[addr]
process.stdout.write(`Attempting claim for ${addr} [${i + 1}/100]... `)
await distrib.callStatic.claim(
claim.index,
addr,
claim.amount,
claim.proof
)
process.stdout.write('OK\n')
}
console.log('Smoke test passed.')
})
import fs from 'fs'
import { program } from 'commander'
import { BigNumber, utils } from 'ethers'
program
.version('0.0.0')
.requiredOption(
'-i, --input <path>',
'input JSON file location containing the merkle proofs for each account and the merkle root'
)
program.parse(process.argv)
const json = JSON.parse(fs.readFileSync(program.input, { encoding: 'utf8' }))
const combinedHash = (first: Buffer, second: Buffer): Buffer => {
if (!first) {
return second
}
if (!second) {
return first
}
return Buffer.from(
utils
.solidityKeccak256(
['bytes32', 'bytes32'],
[first, second].sort(Buffer.compare)
)
.slice(2),
'hex'
)
}
const toNode = (
index: number | BigNumber,
account: string,
amount: BigNumber
): Buffer => {
const pairHex = utils.solidityKeccak256(
['uint256', 'address', 'uint256'],
[index, account, amount]
)
return Buffer.from(pairHex.slice(2), 'hex')
}
const verifyProof = (
index: number | BigNumber,
account: string,
amount: BigNumber,
proof: Buffer[],
expected: Buffer
): boolean => {
let pair = toNode(index, account, amount)
for (const item of proof) {
pair = combinedHash(pair, item)
}
return pair.equals(expected)
}
const getNextLayer = (elements: Buffer[]): Buffer[] => {
return elements.reduce<Buffer[]>((layer, el, idx, arr) => {
if (idx % 2 === 0) {
// Hash the current element with its pair element
layer.push(combinedHash(el, arr[idx + 1]))
}
return layer
}, [])
}
const getRoot = (
_balances: { account: string; amount: BigNumber; index: number }[]
): Buffer => {
let nodes = _balances
.map(({ account, amount, index }) => toNode(index, account, amount))
// sort by lexicographical order
.sort(Buffer.compare)
// deduplicate any eleents
nodes = nodes.filter((el, idx) => {
return idx === 0 || !nodes[idx - 1].equals(el)
})
const layers = []
layers.push(nodes)
// Get next layer until we reach the root
while (layers[layers.length - 1].length > 1) {
layers.push(getNextLayer(layers[layers.length - 1]))
}
return layers[layers.length - 1][0]
}
if (typeof json !== 'object') {
throw new Error('Invalid JSON')
}
const merkleRootHex = json.merkleRoot
const merkleRoot = Buffer.from(merkleRootHex.slice(2), 'hex')
const balances: { index: number; account: string; amount: BigNumber }[] = []
let valid = true
Object.keys(json.claims).forEach((address) => {
const claim = json.claims[address]
const proof = claim.proof.map((p: string) => Buffer.from(p.slice(2), 'hex'))
balances.push({
index: claim.index,
account: address,
amount: BigNumber.from(claim.amount),
})
if (verifyProof(claim.index, address, claim.amount, proof, merkleRoot)) {
console.log('Verified proof for', claim.index, address)
} else {
console.log('Verification for', address, 'failed')
valid = false
}
})
if (!valid) {
console.error('Failed validation for 1 or more proofs')
process.exit(1)
}
console.log('Done!')
// Root
const root = getRoot(balances).toString('hex')
console.log('Reconstructed merkle root', root)
console.log(
'Root matches the one read from the JSON?',
root === merkleRootHex.slice(2)
)
import { BigNumber, utils } from 'ethers'
import MerkleTree from './merkle-tree'
export default class BalanceTree {
private readonly tree: MerkleTree
constructor(balances: { account: string; amount: BigNumber }[]) {
this.tree = new MerkleTree(
balances.map(({ account, amount }, index) => {
return BalanceTree.toNode(index, account, amount)
})
)
}
public static verifyProof(
index: number | BigNumber,
account: string,
amount: BigNumber,
proof: Buffer[],
root: Buffer
): boolean {
let pair = BalanceTree.toNode(index, account, amount)
for (const item of proof) {
pair = MerkleTree.combinedHash(pair, item)
}
return pair.equals(root)
}
// keccak256(abi.encode(index, account, amount))
public static toNode(
index: number | BigNumber,
account: string,
amount: BigNumber
): Buffer {
return Buffer.from(
utils
.solidityKeccak256(
['uint256', 'address', 'uint256'],
[index, account, amount]
)
.substr(2),
'hex'
)
}
public getHexRoot(): string {
return this.tree.getHexRoot()
}
// returns the hex bytes32 values of the proof
public getProof(
index: number | BigNumber,
account: string,
amount: BigNumber
): string[] {
return this.tree.getHexProof(BalanceTree.toNode(index, account, amount))
}
}
import { bufferToHex, keccak256 } from 'ethereumjs-util'
export default class MerkleTree {
private readonly elements: Buffer[]
private readonly bufferElementPositionIndex: { [hexElement: string]: number }
private readonly layers: Buffer[][]
constructor(elements: Buffer[]) {
this.elements = [...elements]
// Sort elements
this.elements.sort(Buffer.compare)
// Deduplicate elements
this.elements = MerkleTree.bufDedup(this.elements)
this.bufferElementPositionIndex = this.elements.reduce<{
[hexElement: string]: number
}>((memo, el, index) => {
memo[bufferToHex(el)] = index
return memo
}, {})
// Create layers
this.layers = this.getLayers(this.elements)
}
getLayers(elements: Buffer[]): Buffer[][] {
if (elements.length === 0) {
throw new Error('empty tree')
}
const layers = []
layers.push(elements)
// Get next layer until we reach the root
while (layers[layers.length - 1].length > 1) {
layers.push(this.getNextLayer(layers[layers.length - 1]))
}
return layers
}
getNextLayer(elements: Buffer[]): Buffer[] {
return elements.reduce<Buffer[]>((layer, el, idx, arr) => {
if (idx % 2 === 0) {
// Hash the current element with its pair element
layer.push(MerkleTree.combinedHash(el, arr[idx + 1]))
}
return layer
}, [])
}
static combinedHash(first: Buffer, second: Buffer): Buffer {
if (!first) {
return second
}
if (!second) {
return first
}
return keccak256(MerkleTree.sortAndConcat(first, second))
}
getRoot(): Buffer {
return this.layers[this.layers.length - 1][0]
}
getHexRoot(): string {
return bufferToHex(this.getRoot())
}
getProof(el: Buffer) {
let idx = this.bufferElementPositionIndex[bufferToHex(el)]
if (typeof idx !== 'number') {
throw new Error('Element does not exist in Merkle tree')
}
return this.layers.reduce((proof, layer) => {
const pairElement = MerkleTree.getPairElement(idx, layer)
if (pairElement) {
proof.push(pairElement)
}
idx = Math.floor(idx / 2)
return proof
}, [])
}
getHexProof(el: Buffer): string[] {
const proof = this.getProof(el)
return MerkleTree.bufArrToHexArr(proof)
}
private static getPairElement(idx: number, layer: Buffer[]): Buffer | null {
const pairIdx = idx % 2 === 0 ? idx + 1 : idx - 1
if (pairIdx < layer.length) {
return layer[pairIdx]
} else {
return null
}
}
private static bufDedup(elements: Buffer[]): Buffer[] {
return elements.filter((el, idx) => {
return idx === 0 || !elements[idx - 1].equals(el)
})
}
private static bufArrToHexArr(arr: Buffer[]): string[] {
if (arr.some((el) => !Buffer.isBuffer(el))) {
throw new Error('Array is not an array of buffers')
}
return arr.map((el) => '0x' + el.toString('hex'))
}
private static sortAndConcat(...args: Buffer[]): Buffer {
return Buffer.concat([...args].sort(Buffer.compare))
}
}
import { BigNumber, utils } from 'ethers'
import BalanceTree from './balance-tree'
const { isAddress, getAddress } = utils
// This is the blob that gets distributed and pinned to IPFS.
// It is completely sufficient for recreating the entire merkle tree.
// Anyone can verify that all air drops are included in the tree,
// and the tree has no additional distributions.
export interface MerkleDistributorInfo {
merkleRoot: string
tokenTotal: string
claims: {
[account: string]: {
index: number
amount: string
proof: string[]
flags?: {
[flag: string]: boolean
}
}
}
}
type OldFormat = { [account: string]: number | string }
type NewFormat = { address: string; earnings: string; reasons: string }
export const parseBalanceMap = (
balances: OldFormat | NewFormat[]
): MerkleDistributorInfo => {
// if balances are in an old format, process them
const balancesInNewFormat: NewFormat[] = Array.isArray(balances)
? balances
: Object.keys(balances).map((account): NewFormat => {
let earnings: string
if (typeof balances[account] === 'number') {
earnings = `0x${balances[account].toString(16)}`
} else {
earnings = BigNumber.from(balances[account]).toHexString()
}
return {
address: account,
earnings,
reasons: '',
}
})
const dataByAddress = balancesInNewFormat.reduce<{
[address: string]: {
amount: BigNumber
flags?: { [flag: string]: boolean }
}
}>((memo, { address: account, earnings, reasons }) => {
if (!isAddress(account)) {
throw new Error(`Found invalid address: ${account}`)
}
const parsed = getAddress(account)
if (memo[parsed]) {
throw new Error(`Duplicate address: ${parsed}`)
}
const parsedNum = BigNumber.from(earnings)
if (parsedNum.lte(0)) {
throw new Error(`Invalid amount for account: ${account}`)
}
const flags = {
isSOCKS: reasons.includes('socks'),
isLP: reasons.includes('lp'),
isUser: reasons.includes('user'),
}
memo[parsed] = { amount: parsedNum, ...(reasons === '' ? {} : { flags }) }
return memo
}, {})
const sortedAddresses = Object.keys(dataByAddress).sort()
// construct a tree
const tree = new BalanceTree(
sortedAddresses.map((address) => ({
account: address,
amount: dataByAddress[address].amount,
}))
)
// generate claims
const claims = sortedAddresses.reduce<{
[address: string]: {
amount: string
index: number
proof: string[]
flags?: { [flag: string]: boolean }
}
}>((memo, address, index) => {
const { amount, flags } = dataByAddress[address]
memo[address] = {
index,
amount: amount.toHexString(),
proof: tree.getProof(index, address, amount),
...(flags ? { flags } : {}),
}
return memo
}, {})
const tokenTotal: BigNumber = sortedAddresses.reduce<BigNumber>(
(memo, key) => memo.add(dataByAddress[key].amount),
BigNumber.from(0)
)
return {
merkleRoot: tree.getHexRoot(),
tokenTotal: tokenTotal.toHexString(),
claims,
}
}
import readline from 'readline'
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
})
export const prompt = (msg: string) =>
new Promise<void>((resolve, reject) =>
rl.question(`${msg} [y/n]: `, (confirmation) => {
if (confirmation !== 'y') {
reject('Aborted!')
}
resolve()
})
)
This diff is collapsed.
This diff is collapsed.
import { ethers } from 'hardhat'
import ethSigUtil from 'eth-sig-util'
export const MAX_UINT256 = ethers.constants.MaxUint256.toString()
export const EIP712Domain = [
{ name: 'name', type: 'string' },
{ name: 'version', type: 'string' },
{ name: 'chainId', type: 'uint256' },
{ name: 'verifyingContract', type: 'address' },
]
export const Permit = [
{ name: 'owner', type: 'address' },
{ name: 'spender', type: 'address' },
{ name: 'value', type: 'uint256' },
{ name: 'nonce', type: 'uint256' },
{ name: 'deadline', type: 'uint256' },
]
export const Delegation = [
{ name: 'delegatee', type: 'address' },
{ name: 'nonce', type: 'uint256' },
{ name: 'expiry', type: 'uint256' },
]
export const buildDataPermit = (
chainId: any,
verifyingContract: any,
owner: any,
spender: any,
value: any,
nonce: any,
deadline = MAX_UINT256
) => ({
primaryType: 'Permit',
types: { EIP712Domain, Permit },
domain: { name: 'Optimism', version: '1', chainId, verifyingContract },
message: { owner, spender, value, nonce, deadline },
})
export const buildDataDelegation = (
chainId: any,
verifyingContract: any,
delegatee: any,
nonce: any,
expiry = MAX_UINT256
) => ({
types: { EIP712Domain, Delegation },
domain: { name: 'Optimism', version: '1', chainId, verifyingContract },
primaryType: 'Delegation',
message: { delegatee, nonce, expiry },
})
export const domainSeparator = (
name: any,
version: any,
chainId: any,
verifyingContract: any
) => {
return (
'0x' +
ethSigUtil.TypedDataUtils.hashStruct(
'EIP712Domain',
{ name, version, chainId, verifyingContract },
{ EIP712Domain }
).toString('hex')
)
}
import { ethers } from 'hardhat'
export const SECONDS_IN_1_DAY = 24 * 60 * 60
export const SECONDS_IN_365_DAYS = 365 * 24 * 60 * 60
export const getBlockTimestamp = async (blockNumber: number) => {
const block = await ethers.provider.getBlock(blockNumber)
return block.timestamp
}
export const fastForwardDays = async (numberOfDays: number) => {
const latestBlock = await ethers.provider.getBlock('latest')
const timestampAfterXDays =
latestBlock.timestamp + numberOfDays * SECONDS_IN_1_DAY
await ethers.provider.send('evm_setNextBlockTimestamp', [timestampAfterXDays])
await ethers.provider.send('evm_mine', [])
}
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"rootDir": "./src",
"outDir": "./dist"
},
"include": [
"src/**/*"
]
}
import { DeployConfig } from '../../src'
const config: DeployConfig = {
retroReceiverOwner: '0xc37f6a6c4AB335E20d10F034B90386E2fb70bbF5',
drippieOwner: '0xc37f6a6c4AB335E20d10F034B90386E2fb70bbF5',
ddd: '0x9C6373dE60c2D3297b18A8f964618ac46E011B58',
}
export default config
import { DeployConfig } from '../../src'
const config: DeployConfig = {
retroReceiverOwner: '0xc37f6a6c4AB335E20d10F034B90386E2fb70bbF5',
drippieOwner: '0xc37f6a6c4AB335E20d10F034B90386E2fb70bbF5',
ddd: '0x9C6373dE60c2D3297b18A8f964618ac46E011B58',
}
export default config
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.9;
contract MockTeleportr {
function withdrawBalance() external {
payable(msg.sender).transfer(address(this).balance);
}
}
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.9;
import { AssetReceiver } from "./AssetReceiver.sol";
/**
* @notice Stub interface for Teleportr.
*/
interface Teleportr {
function withdrawBalance() external;
}
/**
* @title TeleportrWithdrawer
* @notice The TeleportrWithdrawer is a simple contract capable of withdrawing funds from the
* TeleportrContract and sending them to some recipient address.
*/
contract TeleportrWithdrawer is AssetReceiver {
/**
* @notice Address of the Teleportr contract.
*/
address public teleportr;
/**
* @notice Address that will receive Teleportr withdrawals.
*/
address public recipient;
/**
* @notice Data to be sent to the recipient address.
*/
bytes public data;
/**
* @param _owner Initial owner of the contract.
*/
constructor(address _owner) AssetReceiver(_owner) {}
/**
* @notice Allows the owner to update the recipient address.
*
* @param _recipient New recipient address.
*/
function setRecipient(address _recipient) external onlyOwner {
recipient = _recipient;
}
/**
* @notice Allows the owner to update the Teleportr contract address.
*
* @param _teleportr New Teleportr contract address.
*/
function setTeleportr(address _teleportr) external onlyOwner {
teleportr = _teleportr;
}
/**
* @notice Allows the owner to update the data to be sent to the recipient address.
*
* @param _data New data to be sent to the recipient address.
*/
function setData(bytes memory _data) external onlyOwner {
data = _data;
}
/**
* @notice Withdraws the full balance of the Teleportr contract to the recipient address.
* Anyone is allowed to trigger this function since the recipient address cannot be
* controlled by the msg.sender.
*/
function withdrawFromTeleportr() external {
Teleportr(teleportr).withdrawBalance();
(bool success, ) = recipient.call{ value: address(this).balance }(data);
require(success, "TeleportrWithdrawer: send failed");
}
}
......@@ -52,22 +52,39 @@ contract Drippie is AssetReceiver {
DripStatus status;
DripConfig config;
uint256 last;
uint256 count;
}
/**
* Emitted when a new drip is created.
*/
event DripCreated(string indexed name, DripConfig config);
event DripCreated(
// Emit name twice because indexed version is hashed.
string indexed nameref,
string name,
DripConfig config
);
/**
* Emitted when a drip status is updated.
*/
event DripStatusUpdated(string indexed name, DripStatus status);
event DripStatusUpdated(
// Emit name twice because indexed version is hashed.
string indexed nameref,
string name,
DripStatus status
);
/**
* Emitted when a drip is executed.
*/
event DripExecuted(string indexed name, address indexed executor, uint256 timestamp);
event DripExecuted(
// Emit name twice because indexed version is hashed.
string indexed nameref,
string name,
address executor,
uint256 timestamp
);
/**
* Maps from drip names to drip states.
......@@ -109,7 +126,7 @@ contract Drippie is AssetReceiver {
}
// Tell the world!
emit DripCreated(_name, _config);
emit DripCreated(_name, _name, _config);
}
/**
......@@ -163,20 +180,16 @@ contract Drippie is AssetReceiver {
// If we made it here then we can safely update the status.
drips[_name].status = _status;
emit DripStatusUpdated(_name, drips[_name].status);
emit DripStatusUpdated(_name, _name, drips[_name].status);
}
/**
* Triggers a drip. This function is deliberately left as a public function because the
* assumption being made here is that setting the drip to ACTIVE is an affirmative signal that
* the drip should be executable according to the drip parameters, drip check, and drip
* interval. Note that drip parameters are read entirely from the state and are not supplied as
* user input, so there should not be any way for a non-authorized user to influence the
* behavior of the drip.
* Checks if a given drip is executable.
*
* @param _name Name of the drip to trigger.
* @param _name Drip to check.
* @return True if the drip is executable, false otherwise.
*/
function drip(string memory _name) external {
function executable(string memory _name) public view returns (bool) {
DripState storage state = drips[_name];
// Only allow active drips to be executed, an obvious security measure.
......@@ -201,6 +214,29 @@ contract Drippie is AssetReceiver {
"Drippie: dripcheck failed so drip is not yet ready to be triggered"
);
// Alright, we're good to execute.
return true;
}
/**
* Triggers a drip. This function is deliberately left as a public function because the
* assumption being made here is that setting the drip to ACTIVE is an affirmative signal that
* the drip should be executable according to the drip parameters, drip check, and drip
* interval. Note that drip parameters are read entirely from the state and are not supplied as
* user input, so there should not be any way for a non-authorized user to influence the
* behavior of the drip.
*
* @param _name Name of the drip to trigger.
*/
function drip(string memory _name) external {
DripState storage state = drips[_name];
// Make sure the drip can be executed.
require(
executable(_name) == true,
"Drippie: drip cannot be executed at this time, try again later"
);
// Update the last execution time for this drip before the call. Note that it's entirely
// possible for a drip to be executed multiple times per block or even multiple times
// within the same transaction (via re-entrancy) if the drip interval is set to zero. Users
......@@ -240,6 +276,7 @@ contract Drippie is AssetReceiver {
);
}
emit DripExecuted(_name, msg.sender, block.timestamp);
state.count++;
emit DripExecuted(_name, _name, msg.sender, block.timestamp);
}
}
......@@ -11,7 +11,7 @@ const deployFn: DeployFunction = async (hre) => {
const { deploy } = await hre.deployments.deterministic('AssetReceiver', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['RetroReceiver']),
from: deployer,
args: [config.retroReceiverOwner],
args: [config.ddd],
log: true,
})
......@@ -19,6 +19,5 @@ const deployFn: DeployFunction = async (hre) => {
}
deployFn.tags = ['RetroReceiver']
deployFn.dependencies = ['OptimismAuthority']
export default deployFn
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { getDeployConfig } from '../src'
const deployFn: DeployFunction = async (hre) => {
const { deployer } = await hre.getNamedAccounts()
const config = getDeployConfig(hre.network.name)
const { deploy } = await hre.deployments.deterministic(
'TeleportrWithdrawer',
{
salt: hre.ethers.utils.solidityKeccak256(
['string'],
['TeleportrWithdrawer']
),
from: deployer,
args: [config.ddd],
log: true,
}
)
await deploy()
}
deployFn.tags = ['TeleportrWithdrawer']
export default deployFn
......@@ -11,7 +11,7 @@ const deployFn: DeployFunction = async (hre) => {
const { deploy } = await hre.deployments.deterministic('Drippie', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['Drippie']),
from: deployer,
args: [config.drippieOwner],
args: [config.ddd],
log: true,
})
......@@ -19,6 +19,5 @@ const deployFn: DeployFunction = async (hre) => {
}
deployFn.tags = ['Drippie']
deployFn.dependencies = ['OptimismAuthority']
export default deployFn
......@@ -27,7 +27,7 @@ const config: HardhatUserConfig = {
},
},
},
opkovan: {
'optimism-kovan': {
chainId: 69,
url: 'https://kovan.optimism.io',
verify: {
......@@ -97,7 +97,10 @@ const config: HardhatUserConfig = {
},
},
namedAccounts: {
deployer: `ledger://${getenv('LEDGER_ADDRESS')}`,
deployer: {
default: `ledger://${getenv('LEDGER_ADDRESS')}`,
hardhat: 0,
},
},
}
......
......@@ -5,7 +5,7 @@ import { Contract } from 'ethers'
import { expect } from '../../setup'
import { decodeSolidityRevert, deploy } from '../../helpers'
describe('AssetReceiver', () => {
describe('Transactor', () => {
let signer1: SignerWithAddress
let signer2: SignerWithAddress
before('signer setup', async () => {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment