Commit 606873df authored by Mark Tyneway's avatar Mark Tyneway Committed by GitHub

Merge pull request #2025 from ethereum-optimism/develop

Develop -> Master
parents 696f5503 3174f034
---
'@eth-optimism/integration-tests': patch
---
Updates to support nightly actor tests
---
'@eth-optimism/proxyd': minor
---
Add request/response payload size metrics to proxyd
---
'@eth-optimism/data-transport-layer': patch
---
Smaller filter query for searching for L1 start height. This number should be configured so that the search does not need to happen because using a smaller filter will cause it to take too long.
---
'@eth-optimism/proxyd': minor
---
cache immutable RPC responses in proxyd
---
'@eth-optimism/op-exporter': patch
---
Fixes panic caused by version initialized to nil
---
'@eth-optimism/proxyd': minor
---
Add X-Forwarded-For header when proxying RPCs on proxyd
---
'@eth-optimism/gas-oracle': patch
'@eth-optimism/contracts': patch
'@eth-optimism/data-transport-layer': patch
---
String update to change the system name from OE to Optimism
---
'@eth-optimism/op-exporter': patch
---
Added version metrics
---
'@eth-optimism/l2geth': patch
---
Implement updated timestamp logic
---
'@eth-optimism/l2geth': patch
---
changed the default address to be address(0) in `call`
---
'@eth-optimism/message-relayer': patch
---
Fix docker build
---
'@eth-optimism/batch-submitter': patch
---
Properly clear state root batch txs on startup
---
'@eth-optimism/contracts': patch
---
Update hardhat task for managing the gas oracle
---
'@eth-optimism/contracts': patch
---
Remove legacy bin/deploy.ts script
---
'@eth-optimism/integration-tests': patch
---
Update timestamp assertion for new logic
version: 2.1 version: 2.1
orbs: orbs:
gcp-gke: circleci/gcp-gke@1.3.0 gcp-gke: circleci/gcp-gke@1.3.0
slack: circleci/slack@4.5.1
slack-fail-post-step: &slack-fail-post-step
post-steps:
- slack/notify:
channel: $SLACK_DEFAULT_CHANNEL
event: fail
custom: |
{
"text": "",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "🔴 Nightly build failed!"
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "View Job"
},
"url": "${CIRCLE_BUILD_URL}"
}
]
}
]
}
commands: commands:
build-dockerfile: build-dockerfile:
parameters: parameters:
...@@ -78,6 +110,13 @@ jobs: ...@@ -78,6 +110,13 @@ jobs:
image-name: integration-tests image-name: integration-tests
target: integration-tests target: integration-tests
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
build-proxyd:
docker:
- image: cimg/base:2021.04
steps:
- build-dockerfile:
image-name: proxyd
dockerfile: ./go/proxyd/Dockerfile
deploy-nightly: deploy-nightly:
docker: docker:
- image: cimg/base:2021.04 - image: cimg/base:2021.04
...@@ -101,6 +140,15 @@ jobs: ...@@ -101,6 +140,15 @@ jobs:
kubectl rollout restart statefulset nightly-go-batch-submitter --namespace nightly kubectl rollout restart statefulset nightly-go-batch-submitter --namespace nightly
kubectl rollout restart statefulset nightly-dtl --namespace nightly kubectl rollout restart statefulset nightly-dtl --namespace nightly
kubectl rollout restart deployment nightly-gas-oracle --namespace nightly kubectl rollout restart deployment nightly-gas-oracle --namespace nightly
kubectl rollout restart deployment edge-proxyd --namespace nightly
notify:
docker:
- image: cimg/base:2021.04
steps:
- run:
name: Success
command: |
echo "Dummy job."
workflows: workflows:
...@@ -114,21 +162,50 @@ workflows: ...@@ -114,21 +162,50 @@ workflows:
- develop - develop
jobs: jobs:
- build-dtl: - build-dtl:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-batch-submitter: - build-batch-submitter:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-deployer: - build-deployer:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-l2geth: - build-l2geth:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-gas-oracle: - build-gas-oracle:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-integration-tests: - build-integration-tests:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-go-batch-submitter: - build-go-batch-submitter:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
- build-proxyd:
context:
- optimism
- slack
<<: *slack-fail-post-step
- deploy-nightly: - deploy-nightly:
context: optimism context:
- optimism
- slack
<<: *slack-fail-post-step
requires: requires:
- build-dtl - build-dtl
- build-batch-submitter - build-batch-submitter
...@@ -136,4 +213,38 @@ workflows: ...@@ -136,4 +213,38 @@ workflows:
- build-deployer - build-deployer
- build-l2geth - build-l2geth
- build-gas-oracle - build-gas-oracle
- build-integration-tests - build-integration-tests
\ No newline at end of file - build-proxyd
- notify:
context: slack
requires:
- deploy-nightly
post-steps:
- slack/notify:
custom: |
{
"text": "",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "✅ Nightly successfully deployed."
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "View Job"
},
"url": "${CIRCLE_BUILD_URL}"
}
]
}
]
}
event: always
\ No newline at end of file
...@@ -104,7 +104,7 @@ jobs: ...@@ -104,7 +104,7 @@ jobs:
- uses: codecov/codecov-action@v1 - uses: codecov/codecov-action@v1
with: with:
files: ./packages/contracts/coverage.json files: ./packages/contracts/coverage.json
fail_ci_if_error: false fail_ci_if_error: true
verbose: true verbose: true
flags: contracts flags: contracts
- uses: codecov/codecov-action@v1 - uses: codecov/codecov-action@v1
......
...@@ -98,7 +98,7 @@ Use the above commands to recompile the packages. ...@@ -98,7 +98,7 @@ Use the above commands to recompile the packages.
### Building the rest of the system ### Building the rest of the system
If you want to run an Optimistic Ethereum node OR **if you want to run the integration tests**, you'll need to build the rest of the system. If you want to run an Optimism node OR **if you want to run the integration tests**, you'll need to build the rest of the system.
```bash ```bash
cd ops cd ops
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
## TL;DR ## TL;DR
This is the primary place where [Optimism](https://optimism.io) works on stuff related to [Optimistic Ethereum](https://optimistic.etherscan.io/). This is where [Optimism](https://optimism.io) gets built.
## Documentation ## Documentation
...@@ -31,16 +31,16 @@ Then check out our list of [good first issues](https://github.com/ethereum-optim ...@@ -31,16 +31,16 @@ Then check out our list of [good first issues](https://github.com/ethereum-optim
<pre> <pre>
root root
├── <a href="./packages">packages</a> ├── <a href="./packages">packages</a>
│ ├── <a href="./packages/contracts">contracts</a>: L1 and L2 smart contracts for Optimistic Ethereum │ ├── <a href="./packages/contracts">contracts</a>: L1 and L2 smart contracts for Optimism
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimistic Ethereum easier │ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript │ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/data-transport-layer">data-transport-layer</a>: Service for indexing Optimistic Ethereum-related L1 data │ ├── <a href="./packages/data-transport-layer">data-transport-layer</a>: Service for indexing Optimism-related L1 data
│ ├── <a href="./packages/batch-submitter">batch-submitter</a>: Service for submitting batches of transactions and results to L1 │ ├── <a href="./packages/batch-submitter">batch-submitter</a>: Service for submitting batches of transactions and results to L1
│ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development │ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development
│ └── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node │ └── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
├── <a href="./l2geth">l2geth</a>: Optimistic Ethereum client software, a fork of <a href="https://github.com/ethereum/go-ethereum/tree/v1.9.10">geth v1.9.10</a> ├── <a href="./l2geth">l2geth</a>: Optimism client software, a fork of <a href="https://github.com/ethereum/go-ethereum/tree/v1.9.10">geth v1.9.10</a>
├── <a href="./integration-tests">integration-tests</a>: Various integration tests for an Optimistic Ethereum network ├── <a href="./integration-tests">integration-tests</a>: Various integration tests for the Optimism network
└── <a href="./ops">ops</a>: Tools for running Optimistic Ethereum nodes and networks └── <a href="./ops">ops</a>: Tools for running Optimism nodes and networks
</pre> </pre>
## Branching Model and Releases ## Branching Model and Releases
...@@ -64,7 +64,7 @@ Please read the linked post if you're planning to make frequent PRs into this re ...@@ -64,7 +64,7 @@ Please read the linked post if you're planning to make frequent PRs into this re
The `master` branch contains the code for our latest "stable" releases. The `master` branch contains the code for our latest "stable" releases.
Updates from `master` always come from the `develop` branch. Updates from `master` always come from the `develop` branch.
We only ever update the `master` branch when we intend to deploy code within the `develop` to the Optimistic Ethereum mainnet. We only ever update the `master` branch when we intend to deploy code within the `develop` to the Optimism mainnet.
Our update process takes the form of a PR merging the `develop` branch into the `master` branch. Our update process takes the form of a PR merging the `develop` branch into the `master` branch.
### The `develop` branch ### The `develop` branch
......
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"fmt" "fmt"
"math/big"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
...@@ -13,6 +12,7 @@ import ( ...@@ -13,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/go/batch-submitter/drivers/proposer" "github.com/ethereum-optimism/optimism/go/batch-submitter/drivers/proposer"
"github.com/ethereum-optimism/optimism/go/batch-submitter/drivers/sequencer" "github.com/ethereum-optimism/optimism/go/batch-submitter/drivers/sequencer"
"github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr" "github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr"
"github.com/ethereum-optimism/optimism/go/batch-submitter/utils"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
...@@ -159,9 +159,9 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) { ...@@ -159,9 +159,9 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
} }
txManagerConfig := txmgr.Config{ txManagerConfig := txmgr.Config{
MinGasPrice: gasPriceFromGwei(1), MinGasPrice: utils.GasPriceFromGwei(1),
MaxGasPrice: gasPriceFromGwei(cfg.MaxGasPriceInGwei), MaxGasPrice: utils.GasPriceFromGwei(cfg.MaxGasPriceInGwei),
GasRetryIncrement: gasPriceFromGwei(cfg.GasRetryIncrement), GasRetryIncrement: utils.GasPriceFromGwei(cfg.GasRetryIncrement),
ResubmissionTimeout: cfg.ResubmissionTimeout, ResubmissionTimeout: cfg.ResubmissionTimeout,
ReceiptQueryInterval: time.Second, ReceiptQueryInterval: time.Second,
} }
...@@ -186,6 +186,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) { ...@@ -186,6 +186,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
Context: ctx, Context: ctx,
Driver: batchTxDriver, Driver: batchTxDriver,
PollInterval: cfg.PollInterval, PollInterval: cfg.PollInterval,
ClearPendingTx: cfg.ClearPendingTxs,
L1Client: l1Client, L1Client: l1Client,
TxManagerConfig: txManagerConfig, TxManagerConfig: txManagerConfig,
}) })
...@@ -212,6 +213,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) { ...@@ -212,6 +213,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
Context: ctx, Context: ctx,
Driver: batchStateDriver, Driver: batchStateDriver,
PollInterval: cfg.PollInterval, PollInterval: cfg.PollInterval,
ClearPendingTx: cfg.ClearPendingTxs,
L1Client: l1Client, L1Client: l1Client,
TxManagerConfig: txManagerConfig, TxManagerConfig: txManagerConfig,
}) })
...@@ -333,7 +335,3 @@ func traceRateToFloat64(rate time.Duration) float64 { ...@@ -333,7 +335,3 @@ func traceRateToFloat64(rate time.Duration) float64 {
} }
return rate64 return rate64
} }
func gasPriceFromGwei(gasPriceInGwei uint64) *big.Int {
return new(big.Int).SetUint64(gasPriceInGwei * 1e9)
}
package drivers
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strings"
"github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// ErrClearPendingRetry signals that a transaction from a previous running
// instance confirmed rather than our clearing transaction on startup. In this
// case the caller should retry.
var ErrClearPendingRetry = errors.New("retry clear pending txn")
// ClearPendingTx publishes a NOOP transaction at the wallet's next unused
// nonce. This is used on restarts in order to clear the mempool of any prior
// publications and ensure the batch submitter starts submitting from a clean
// slate.
func ClearPendingTx(
name string,
ctx context.Context,
txMgr txmgr.TxManager,
l1Client L1Client,
walletAddr common.Address,
privKey *ecdsa.PrivateKey,
chainID *big.Int,
) error {
// Query for the submitter's current nonce.
nonce, err := l1Client.NonceAt(ctx, walletAddr, nil)
if err != nil {
log.Error(name+" unable to get current nonce",
"err", err)
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Construct the clearing transaction submission clousure that will attempt
// to send the a clearing transaction transaction at the given nonce and gas
// price.
sendTx := func(
ctx context.Context,
gasPrice *big.Int,
) (*types.Transaction, error) {
log.Info(name+" clearing pending tx", "nonce", nonce,
"gasPrice", gasPrice)
signedTx, err := SignClearingTx(
ctx, walletAddr, nonce, gasPrice, l1Client, privKey, chainID,
)
if err != nil {
log.Error(name+" unable to sign clearing tx", "nonce", nonce,
"gasPrice", gasPrice, "err", err)
return nil, err
}
txHash := signedTx.Hash()
err = l1Client.SendTransaction(ctx, signedTx)
switch {
// Clearing transaction successfully confirmed.
case err == nil:
log.Info(name+" submitted clearing tx", "nonce", nonce,
"gasPrice", gasPrice, "txHash", txHash)
return signedTx, nil
// Getting a nonce too low error implies that a previous transaction in
// the mempool has confirmed and we should abort trying to publish at
// this nonce.
case strings.Contains(err.Error(), core.ErrNonceTooLow.Error()):
log.Info(name + " transaction from previous restart confirmed, " +
"aborting mempool clearing")
cancel()
return nil, context.Canceled
// An unexpected error occurred. This also handles the case where the
// clearing transaction has not yet bested the gas price a prior
// transaction in the mempool at this nonce. In such a case we will
// continue until our ratchetting strategy overtakes the old
// transaction, or abort if the old one confirms.
default:
log.Error(name+" unable to submit clearing tx",
"nonce", nonce, "gasPrice", gasPrice, "txHash", txHash,
"err", err)
return nil, err
}
}
receipt, err := txMgr.Send(ctx, sendTx)
switch {
// If the current context is canceled, a prior transaction in the mempool
// confirmed. The caller should retry, which will use the next nonce, before
// proceeding.
case err == context.Canceled:
log.Info(name + " transaction from previous restart confirmed, " +
"proceeding to startup")
return ErrClearPendingRetry
// Otherwise we were unable to confirm our transaction, this method should
// be retried by the caller.
case err != nil:
log.Warn(name+" unable to send clearing tx", "nonce", nonce,
"err", err)
return err
// We succeeded in confirming a clearing transaction. Proceed to startup as
// normal.
default:
log.Info(name+" cleared pending tx", "nonce", nonce,
"txHash", receipt.TxHash)
return nil
}
}
// SignClearingTx creates a signed clearing tranaction which sends 0 ETH back to
// the sender's address. EstimateGas is used to set an appropriate gas limit.
func SignClearingTx(
ctx context.Context,
walletAddr common.Address,
nonce uint64,
gasPrice *big.Int,
l1Client L1Client,
privKey *ecdsa.PrivateKey,
chainID *big.Int,
) (*types.Transaction, error) {
gasLimit, err := l1Client.EstimateGas(ctx, ethereum.CallMsg{
To: &walletAddr,
GasPrice: gasPrice,
Value: nil,
Data: nil,
})
if err != nil {
return nil, err
}
tx := CraftClearingTx(walletAddr, nonce, gasPrice, gasLimit)
return types.SignTx(
tx, types.LatestSignerForChainID(chainID), privKey,
)
}
// CraftClearingTx creates an unsigned clearing transaction which sends 0 ETH
// back to the sender's address.
func CraftClearingTx(
walletAddr common.Address,
nonce uint64,
gasPrice *big.Int,
gasLimit uint64,
) *types.Transaction {
return types.NewTx(&types.LegacyTx{
To: &walletAddr,
Nonce: nonce,
GasPrice: gasPrice,
Gas: gasLimit,
Value: nil,
Data: nil,
})
}
package drivers_test
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/go/batch-submitter/drivers"
"github.com/ethereum-optimism/optimism/go/batch-submitter/mock"
"github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr"
"github.com/ethereum-optimism/optimism/go/batch-submitter/utils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func init() {
privKey, err := crypto.GenerateKey()
if err != nil {
panic(err)
}
testPrivKey = privKey
testWalletAddr = crypto.PubkeyToAddress(privKey.PublicKey)
testChainID = new(big.Int).SetUint64(1)
testGasPrice = new(big.Int).SetUint64(3)
}
var (
testPrivKey *ecdsa.PrivateKey
testWalletAddr common.Address
testChainID *big.Int // 1
testNonce = uint64(2)
testGasPrice *big.Int // 3
testGasLimit = uint64(4)
)
// TestCraftClearingTx asserts that CraftClearingTx produces the expected
// unsigned clearing transaction.
func TestCraftClearingTx(t *testing.T) {
tx := drivers.CraftClearingTx(
testWalletAddr, testNonce, testGasPrice, testGasLimit,
)
require.Equal(t, &testWalletAddr, tx.To())
require.Equal(t, testNonce, tx.Nonce())
require.Equal(t, testGasPrice, tx.GasPrice())
require.Equal(t, testGasLimit, tx.Gas())
require.Equal(t, new(big.Int), tx.Value())
require.Nil(t, tx.Data())
}
// TestSignClearingTxSuccess asserts that we will sign a properly formed
// clearing transaction when the call to EstimateGas succeeds.
func TestSignClearingTxEstimateGasSuccess(t *testing.T) {
l1Client := mock.NewL1Client(mock.L1ClientConfig{
EstimateGas: func(_ context.Context, _ ethereum.CallMsg) (uint64, error) {
return testGasLimit, nil
},
})
tx, err := drivers.SignClearingTx(
context.Background(), testWalletAddr, testNonce, testGasPrice, l1Client,
testPrivKey, testChainID,
)
require.Nil(t, err)
require.NotNil(t, tx)
require.Equal(t, &testWalletAddr, tx.To())
require.Equal(t, testNonce, tx.Nonce())
require.Equal(t, testGasPrice, tx.GasPrice())
require.Equal(t, testGasLimit, tx.Gas())
require.Equal(t, new(big.Int), tx.Value())
require.Nil(t, tx.Data())
// Finally, ensure the sender is correct.
sender, err := types.Sender(types.LatestSignerForChainID(testChainID), tx)
require.Nil(t, err)
require.Equal(t, testWalletAddr, sender)
}
// TestSignClearingTxEstimateGasFail asserts that signing a clearing transaction
// will fail if the underlying call to EstimateGas fails.
func TestSignClearingTxEstimateGasFail(t *testing.T) {
errEstimateGas := errors.New("estimate gas")
l1Client := mock.NewL1Client(mock.L1ClientConfig{
EstimateGas: func(_ context.Context, _ ethereum.CallMsg) (uint64, error) {
return 0, errEstimateGas
},
})
tx, err := drivers.SignClearingTx(
context.Background(), testWalletAddr, testNonce, testGasPrice, l1Client,
testPrivKey, testChainID,
)
require.Equal(t, errEstimateGas, err)
require.Nil(t, tx)
}
type clearPendingTxHarness struct {
l1Client drivers.L1Client
txMgr txmgr.TxManager
}
func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingTxHarness {
if l1ClientConfig.NonceAt == nil {
l1ClientConfig.NonceAt = func(_ context.Context, _ common.Address, _ *big.Int) (uint64, error) {
return testNonce, nil
}
}
if l1ClientConfig.EstimateGas == nil {
l1ClientConfig.EstimateGas = func(_ context.Context, _ ethereum.CallMsg) (uint64, error) {
return testGasLimit, nil
}
}
l1Client := mock.NewL1Client(l1ClientConfig)
txMgr := txmgr.NewSimpleTxManager("test", txmgr.Config{
MinGasPrice: utils.GasPriceFromGwei(1),
MaxGasPrice: utils.GasPriceFromGwei(100),
GasRetryIncrement: utils.GasPriceFromGwei(5),
ResubmissionTimeout: time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
}, l1Client)
return &clearPendingTxHarness{
l1Client: l1Client,
txMgr: txMgr,
}
}
// TestClearPendingTxClearingTxÇonfirms asserts the happy path where our
// clearing transactions confirms unobstructed.
func TestClearPendingTxClearingTxConfirms(t *testing.T) {
h := newClearPendingTxHarness(mock.L1ClientConfig{
SendTransaction: func(_ context.Context, _ *types.Transaction) error {
return nil
},
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return &types.Receipt{
TxHash: txHash,
}, nil
},
})
err := drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Nil(t, err)
}
// TestClearPendingTx∏reviousTxConfirms asserts that if the mempool starts
// rejecting our transactions because the nonce is too low that ClearPendingTx
// will abort continuing to publish a clearing transaction.
func TestClearPendingTxPreviousTxConfirms(t *testing.T) {
h := newClearPendingTxHarness(mock.L1ClientConfig{
SendTransaction: func(_ context.Context, _ *types.Transaction) error {
return core.ErrNonceTooLow
},
})
err := drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Equal(t, drivers.ErrClearPendingRetry, err)
}
// TestClearPendingTxTimeout asserts that ClearPendingTx returns an
// ErrPublishTimeout if the clearing transaction fails to confirm in a timely
// manner and no prior transaction confirms.
func TestClearPendingTxTimeout(t *testing.T) {
h := newClearPendingTxHarness(mock.L1ClientConfig{
SendTransaction: func(_ context.Context, _ *types.Transaction) error {
return nil
},
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return nil, nil
},
})
err := drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Equal(t, txmgr.ErrPublishTimeout, err)
}
package drivers
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// L1Client is an abstraction over an L1 Ethereum client functionality required
// by the batch submitter.
type L1Client interface {
// EstimateGas tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as
// other transactions may be added or removed by miners, but it should
// provide a basis for setting a reasonable default.
EstimateGas(context.Context, ethereum.CallMsg) (uint64, error)
// NonceAt returns the account nonce of the given account. The block number
// can be nil, in which case the nonce is taken from the latest known block.
NonceAt(context.Context, common.Address, *big.Int) (uint64, error)
// SendTransaction injects a signed transaction into the pending pool for
// execution.
//
// If the transaction was a contract creation use the TransactionReceipt
// method to get the contract address after the transaction has been mined.
SendTransaction(context.Context, *types.Transaction) error
// TransactionReceipt returns the receipt of a transaction by transaction
// hash. Note that the receipt is not available for pending transactions.
TransactionReceipt(context.Context, common.Hash) (*types.Receipt, error)
}
...@@ -5,13 +5,17 @@ import ( ...@@ -5,13 +5,17 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"fmt" "fmt"
"math/big" "math/big"
"time" "strings"
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc" "github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/scc" "github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/scc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/drivers"
"github.com/ethereum-optimism/optimism/go/batch-submitter/metrics" "github.com/ethereum-optimism/optimism/go/batch-submitter/metrics"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types" "github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum-optimism/optimism/l2geth/params"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
...@@ -19,6 +23,9 @@ import ( ...@@ -19,6 +23,9 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
) )
// stateRootSize is the size in bytes of a state root.
const stateRootSize = 32
var bigOne = new(big.Int).SetUint64(1) //nolint:unused var bigOne = new(big.Int).SetUint64(1) //nolint:unused
type Config struct { type Config struct {
...@@ -34,11 +41,12 @@ type Config struct { ...@@ -34,11 +41,12 @@ type Config struct {
} }
type Driver struct { type Driver struct {
cfg Config cfg Config
sccContract *scc.StateCommitmentChain sccContract *scc.StateCommitmentChain
ctcContract *ctc.CanonicalTransactionChain rawSccContract *bind.BoundContract
walletAddr common.Address ctcContract *ctc.CanonicalTransactionChain
metrics *metrics.Metrics walletAddr common.Address
metrics *metrics.Metrics
} }
func NewDriver(cfg Config) (*Driver, error) { func NewDriver(cfg Config) (*Driver, error) {
...@@ -56,14 +64,26 @@ func NewDriver(cfg Config) (*Driver, error) { ...@@ -56,14 +64,26 @@ func NewDriver(cfg Config) (*Driver, error) {
return nil, err return nil, err
} }
parsed, err := abi.JSON(strings.NewReader(
scc.StateCommitmentChainABI,
))
if err != nil {
return nil, err
}
rawSccContract := bind.NewBoundContract(
cfg.SCCAddr, parsed, cfg.L1Client, cfg.L1Client, cfg.L1Client,
)
walletAddr := crypto.PubkeyToAddress(cfg.PrivKey.PublicKey) walletAddr := crypto.PubkeyToAddress(cfg.PrivKey.PublicKey)
return &Driver{ return &Driver{
cfg: cfg, cfg: cfg,
sccContract: sccContract, sccContract: sccContract,
ctcContract: ctcContract, rawSccContract: rawSccContract,
walletAddr: walletAddr, ctcContract: ctcContract,
metrics: metrics.NewMetrics(cfg.Name), walletAddr: walletAddr,
metrics: metrics.NewMetrics(cfg.Name),
}, nil }, nil
} }
...@@ -82,6 +102,21 @@ func (d *Driver) Metrics() *metrics.Metrics { ...@@ -82,6 +102,21 @@ func (d *Driver) Metrics() *metrics.Metrics {
return d.metrics return d.metrics
} }
// ClearPendingTx a publishes a transaction at the next available nonce in order
// to clear any transactions in the mempool left over from a prior running
// instance of the batch submitter.
func (d *Driver) ClearPendingTx(
ctx context.Context,
txMgr txmgr.TxManager,
l1Client *ethclient.Client,
) error {
return drivers.ClearPendingTx(
d.cfg.Name, ctx, txMgr, l1Client, d.walletAddr, d.cfg.PrivKey,
d.cfg.ChainID,
)
}
// GetBatchBlockRange returns the start and end L2 block heights that need to be // GetBatchBlockRange returns the start and end L2 block heights that need to be
// processed. Note that the end value is *exclusive*, therefore if the returned // processed. Note that the end value is *exclusive*, therefore if the returned
// values are identical nothing needs to be processed. // values are identical nothing needs to be processed.
...@@ -89,7 +124,6 @@ func (d *Driver) GetBatchBlockRange( ...@@ -89,7 +124,6 @@ func (d *Driver) GetBatchBlockRange(
ctx context.Context) (*big.Int, *big.Int, error) { ctx context.Context) (*big.Int, *big.Int, error) {
blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset) blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset)
maxBatchSize := new(big.Int).SetUint64(1)
start, err := d.sccContract.GetTotalElements(&bind.CallOpts{ start, err := d.sccContract.GetTotalElements(&bind.CallOpts{
Pending: false, Pending: false,
...@@ -100,20 +134,14 @@ func (d *Driver) GetBatchBlockRange( ...@@ -100,20 +134,14 @@ func (d *Driver) GetBatchBlockRange(
} }
start.Add(start, blockOffset) start.Add(start, blockOffset)
totalElements, err := d.ctcContract.GetTotalElements(&bind.CallOpts{ end, err := d.ctcContract.GetTotalElements(&bind.CallOpts{
Pending: false, Pending: false,
Context: ctx, Context: ctx,
}) })
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
totalElements.Add(totalElements, blockOffset) end.Add(end, blockOffset)
// Take min(start + blockOffset + maxBatchSize, totalElements).
end := new(big.Int).Add(start, maxBatchSize)
if totalElements.Cmp(end) < 0 {
end.Set(totalElements)
}
if start.Cmp(end) > 0 { if start.Cmp(end) > 0 {
return nil, nil, fmt.Errorf("invalid range, "+ return nil, nil, fmt.Errorf("invalid range, "+
...@@ -123,36 +151,43 @@ func (d *Driver) GetBatchBlockRange( ...@@ -123,36 +151,43 @@ func (d *Driver) GetBatchBlockRange(
return start, end, nil return start, end, nil
} }
// SubmitBatchTx transforms the L2 blocks between start and end into a batch // CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce and gasPrice. The final transaction is // transaction using the given nonce. A dummy gas price is used in the resulting
// published and returned to the call. // transaction to use for size estimation.
func (d *Driver) SubmitBatchTx( //
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (d *Driver) CraftBatchTx(
ctx context.Context, ctx context.Context,
start, end, nonce, gasPrice *big.Int) (*types.Transaction, error) { start, end, nonce *big.Int,
) (*types.Transaction, error) {
batchTxBuildStart := time.Now() name := d.cfg.Name
var blocks []*l2types.Block log.Info(name+" crafting batch tx", "start", start, "end", end,
"nonce", nonce)
var (
stateRoots [][stateRootSize]byte
totalStateRootSize uint64
)
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) { for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
// Consume state roots until reach our maximum tx size.
if totalStateRootSize+stateRootSize > d.cfg.MaxTxSize {
break
}
block, err := d.cfg.L2Client.BlockByNumber(ctx, i) block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil { if err != nil {
return nil, err return nil, err
} }
blocks = append(blocks, block) totalStateRootSize += stateRootSize
// TODO(conner): remove when moving to multiple blocks
break //nolint
}
var stateRoots = make([][32]byte, 0, len(blocks))
for _, block := range blocks {
stateRoots = append(stateRoots, block.Root()) stateRoots = append(stateRoots, block.Root())
} }
batchTxBuildTime := float64(time.Since(batchTxBuildStart) / time.Millisecond) d.metrics.NumElementsPerBatch.Observe(float64(len(stateRoots)))
d.metrics.BatchTxBuildTime.Set(batchTxBuildTime)
d.metrics.NumTxPerBatch.Observe(float64(len(blocks))) log.Info(name+" batch constructed", "num_state_roots", len(stateRoots))
opts, err := bind.NewKeyedTransactorWithChainID( opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID, d.cfg.PrivKey, d.cfg.ChainID,
...@@ -160,12 +195,35 @@ func (d *Driver) SubmitBatchTx( ...@@ -160,12 +195,35 @@ func (d *Driver) SubmitBatchTx(
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts.Nonce = nonce
opts.Context = ctx opts.Context = ctx
opts.GasPrice = gasPrice opts.Nonce = nonce
opts.GasPrice = big.NewInt(params.GWei) // dummy
opts.NoSend = true
blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset) blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset)
offsetStartsAtIndex := new(big.Int).Sub(start, blockOffset) offsetStartsAtIndex := new(big.Int).Sub(start, blockOffset)
return d.sccContract.AppendStateBatch(opts, stateRoots, offsetStartsAtIndex) return d.sccContract.AppendStateBatch(opts, stateRoots, offsetStartsAtIndex)
} }
// SubmitBatchTx using the passed transaction as a template, signs and publishes
// an otherwise identical transaction after setting the provided gas price. The
// final transaction is returned to the caller.
func (d *Driver) SubmitBatchTx(
ctx context.Context,
tx *types.Transaction,
gasPrice *big.Int,
) (*types.Transaction, error) {
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
if err != nil {
return nil, err
}
opts.Context = ctx
opts.Nonce = new(big.Int).SetUint64(tx.Nonce())
opts.GasPrice = gasPrice
return d.rawSccContract.RawTransact(opts, tx.Data())
}
...@@ -27,7 +27,7 @@ type BatchElement struct { ...@@ -27,7 +27,7 @@ type BatchElement struct {
// Tx is the optional transaction that was applied in this batch. // Tx is the optional transaction that was applied in this batch.
// //
// NOTE: This field will only be populated for sequencer txs. // NOTE: This field will only be populated for sequencer txs.
Tx *l2types.Transaction Tx *CachedTx
} }
// IsSequencerTx returns true if this batch contains a tx that needs to be // IsSequencerTx returns true if this batch contains a tx that needs to be
...@@ -54,14 +54,15 @@ func BatchElementFromBlock(block *l2types.Block) BatchElement { ...@@ -54,14 +54,15 @@ func BatchElementFromBlock(block *l2types.Block) BatchElement {
isSequencerTx := tx.QueueOrigin() == l2types.QueueOriginSequencer isSequencerTx := tx.QueueOrigin() == l2types.QueueOriginSequencer
// Only include sequencer txs in the returned BatchElement. // Only include sequencer txs in the returned BatchElement.
if !isSequencerTx { var cachedTx *CachedTx
tx = nil if isSequencerTx {
cachedTx = NewCachedTx(tx)
} }
return BatchElement{ return BatchElement{
Timestamp: block.Time(), Timestamp: block.Time(),
BlockNumber: l1BlockNumber, BlockNumber: l1BlockNumber,
Tx: tx, Tx: cachedTx,
} }
} }
...@@ -82,7 +83,7 @@ func GenSequencerBatchParams( ...@@ -82,7 +83,7 @@ func GenSequencerBatchParams(
var ( var (
contexts []BatchContext contexts []BatchContext
groupedBlocks []groupedBlock groupedBlocks []groupedBlock
txs []*l2types.Transaction txs []*CachedTx
lastBlockIsSequencerTx bool lastBlockIsSequencerTx bool
lastTimestamp uint64 lastTimestamp uint64
lastBlockNumber uint64 lastBlockNumber uint64
......
...@@ -31,7 +31,7 @@ func TestBatchElementFromBlock(t *testing.T) { ...@@ -31,7 +31,7 @@ func TestBatchElementFromBlock(t *testing.T) {
require.Equal(t, element.Timestamp, expTime) require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber) require.Equal(t, element.BlockNumber, expBlockNumber)
require.True(t, element.IsSequencerTx()) require.True(t, element.IsSequencerTx())
require.Equal(t, element.Tx, expTx) require.Equal(t, element.Tx.Tx(), expTx)
queueMeta := l2types.NewTransactionMeta( queueMeta := l2types.NewTransactionMeta(
new(big.Int).SetUint64(expBlockNumber), 0, nil, new(big.Int).SetUint64(expBlockNumber), 0, nil,
......
package sequencer
import (
"bytes"
"fmt"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
)
type CachedTx struct {
tx *l2types.Transaction
rawTx []byte
}
func NewCachedTx(tx *l2types.Transaction) *CachedTx {
var txBuf bytes.Buffer
if err := tx.EncodeRLP(&txBuf); err != nil {
panic(fmt.Sprintf("Unable to encode tx: %v", err))
}
return &CachedTx{
tx: tx,
rawTx: txBuf.Bytes(),
}
}
func (t *CachedTx) Tx() *l2types.Transaction {
return t.tx
}
func (t *CachedTx) Size() int {
return len(t.rawTx)
}
func (t *CachedTx) RawTx() []byte {
return t.rawTx
}
...@@ -3,16 +3,16 @@ package sequencer ...@@ -3,16 +3,16 @@ package sequencer
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/hex"
"fmt" "fmt"
"math/big" "math/big"
"strings" "strings"
"time"
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc" "github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/drivers"
"github.com/ethereum-optimism/optimism/go/batch-submitter/metrics" "github.com/ethereum-optimism/optimism/go/batch-submitter/metrics"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types" "github.com/ethereum-optimism/optimism/go/batch-submitter/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/params"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -100,6 +100,21 @@ func (d *Driver) Metrics() *metrics.Metrics { ...@@ -100,6 +100,21 @@ func (d *Driver) Metrics() *metrics.Metrics {
return d.metrics return d.metrics
} }
// ClearPendingTx a publishes a transaction at the next available nonce in order
// to clear any transactions in the mempool left over from a prior running
// instance of the batch submitter.
func (d *Driver) ClearPendingTx(
ctx context.Context,
txMgr txmgr.TxManager,
l1Client *ethclient.Client,
) error {
return drivers.ClearPendingTx(
d.cfg.Name, ctx, txMgr, l1Client, d.walletAddr, d.cfg.PrivKey,
d.cfg.ChainID,
)
}
// GetBatchBlockRange returns the start and end L2 block heights that need to be // GetBatchBlockRange returns the start and end L2 block heights that need to be
// processed. Note that the end value is *exclusive*, therefore if the returned // processed. Note that the end value is *exclusive*, therefore if the returned
// values are identical nothing needs to be processed. // values are identical nothing needs to be processed.
...@@ -133,66 +148,106 @@ func (d *Driver) GetBatchBlockRange( ...@@ -133,66 +148,106 @@ func (d *Driver) GetBatchBlockRange(
return start, end, nil return start, end, nil
} }
// SubmitBatchTx transforms the L2 blocks between start and end into a batch // CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce and gasPrice. The final transaction is // transaction using the given nonce. A dummy gas price is used in the resulting
// published and returned to the call. // transaction to use for size estimation.
func (d *Driver) SubmitBatchTx( //
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (d *Driver) CraftBatchTx(
ctx context.Context, ctx context.Context,
start, end, nonce, gasPrice *big.Int) (*types.Transaction, error) { start, end, nonce *big.Int,
) (*types.Transaction, error) {
name := d.cfg.Name name := d.cfg.Name
log.Info(name+" submitting batch tx", "start", start, "end", end, log.Info(name+" crafting batch tx", "start", start, "end", end,
"gasPrice", gasPrice) "nonce", nonce)
batchTxBuildStart := time.Now()
var blocks []*l2types.Block var (
batchElements []BatchElement
totalTxSize uint64
)
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) { for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
block, err := d.cfg.L2Client.BlockByNumber(ctx, i) block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil { if err != nil {
return nil, err return nil, err
} }
blocks = append(blocks, block) // For each sequencer transaction, update our running total with the
// size of the transaction.
// TODO(conner): remove when moving to multiple blocks batchElement := BatchElementFromBlock(block)
break //nolint if batchElement.IsSequencerTx() {
} // Abort once the total size estimate is greater than the maximum
// configured size. This is a conservative estimate, as the total
// calldata size will be greater when batch contexts are included.
// Below this set will be further whittled until the raw call data
// size also adheres to this constraint.
txLen := batchElement.Tx.Size()
if totalTxSize+uint64(TxLenSize+txLen) > d.cfg.MaxTxSize {
break
}
totalTxSize += uint64(TxLenSize + txLen)
}
var batchElements = make([]BatchElement, 0, len(blocks)) batchElements = append(batchElements, batchElement)
for _, block := range blocks {
batchElements = append(batchElements, BatchElementFromBlock(block))
} }
shouldStartAt := start.Uint64() shouldStartAt := start.Uint64()
batchParams, err := GenSequencerBatchParams( var pruneCount int
shouldStartAt, d.cfg.BlockOffset, batchElements, for {
) batchParams, err := GenSequencerBatchParams(
if err != nil { shouldStartAt, d.cfg.BlockOffset, batchElements,
return nil, err )
} if err != nil {
return nil, err
}
log.Info(name+" batch params", "params", fmt.Sprintf("%#v", batchParams)) batchArguments, err := batchParams.Serialize()
if err != nil {
return nil, err
}
batchArguments, err := batchParams.Serialize() appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
if err != nil { batchCallData := append(appendSequencerBatchID, batchArguments...)
return nil, err
} // Continue pruning until calldata size is less than configured max.
if uint64(len(batchCallData)) > d.cfg.MaxTxSize {
oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen)
pruneCount++
continue
}
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID d.metrics.NumElementsPerBatch.Observe(float64(len(batchElements)))
batchCallData := append(appendSequencerBatchID, batchArguments...) d.metrics.BatchPruneCount.Set(float64(pruneCount))
if uint64(len(batchCallData)) > d.cfg.MaxTxSize { log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(batchCallData))
panic("call data too large")
}
// Record the batch_tx_build_time. opts, err := bind.NewKeyedTransactorWithChainID(
batchTxBuildTime := float64(time.Since(batchTxBuildStart) / time.Millisecond) d.cfg.PrivKey, d.cfg.ChainID,
d.metrics.BatchTxBuildTime.Set(batchTxBuildTime) )
d.metrics.NumTxPerBatch.Observe(float64(len(blocks))) if err != nil {
return nil, err
}
opts.Context = ctx
opts.Nonce = nonce
opts.GasPrice = big.NewInt(params.GWei) // dummy
opts.NoSend = true
return d.rawCtcContract.RawTransact(opts, batchCallData)
}
}
log.Info(name+" batch call data", "data", hex.EncodeToString(batchCallData)) // SubmitBatchTx using the passed transaction as a template, signs and publishes
// an otherwise identical transaction after setting the provided gas price. The
// final transaction is returned to the caller.
func (d *Driver) SubmitBatchTx(
ctx context.Context,
tx *types.Transaction,
gasPrice *big.Int,
) (*types.Transaction, error) {
opts, err := bind.NewKeyedTransactorWithChainID( opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID, d.cfg.PrivKey, d.cfg.ChainID,
...@@ -200,9 +255,9 @@ func (d *Driver) SubmitBatchTx( ...@@ -200,9 +255,9 @@ func (d *Driver) SubmitBatchTx(
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts.Nonce = nonce
opts.Context = ctx opts.Context = ctx
opts.Nonce = new(big.Int).SetUint64(tx.Nonce())
opts.GasPrice = gasPrice opts.GasPrice = gasPrice
return d.rawCtcContract.RawTransact(opts, batchCallData) return d.rawCtcContract.RawTransact(opts, tx.Data())
} }
...@@ -11,6 +11,12 @@ import ( ...@@ -11,6 +11,12 @@ import (
l2rlp "github.com/ethereum-optimism/optimism/l2geth/rlp" l2rlp "github.com/ethereum-optimism/optimism/l2geth/rlp"
) )
const (
// TxLenSize is the number of bytes used to represent the size of a
// serialized sequencer transaction.
TxLenSize = 3
)
var byteOrder = binary.BigEndian var byteOrder = binary.BigEndian
// BatchContext denotes a range of transactions that belong the same batch. It // BatchContext denotes a range of transactions that belong the same batch. It
...@@ -88,7 +94,7 @@ type AppendSequencerBatchParams struct { ...@@ -88,7 +94,7 @@ type AppendSequencerBatchParams struct {
// Txs contains all sequencer txs that will be recorded in the L1 CTC // Txs contains all sequencer txs that will be recorded in the L1 CTC
// contract. // contract.
Txs []*l2types.Transaction Txs []*CachedTx
} }
// Write encodes the AppendSequencerBatchParams using the following format: // Write encodes the AppendSequencerBatchParams using the following format:
...@@ -110,16 +116,9 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { ...@@ -110,16 +116,9 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
} }
// Write each length-prefixed tx. // Write each length-prefixed tx.
var txBuf bytes.Buffer
for _, tx := range p.Txs { for _, tx := range p.Txs {
txBuf.Reset() writeUint64(w, uint64(tx.Size()), TxLenSize)
_, _ = w.Write(tx.RawTx()) // can't fail for bytes.Buffer
if err := tx.EncodeRLP(&txBuf); err != nil {
return err
}
writeUint64(w, uint64(txBuf.Len()), 3)
_, _ = w.Write(txBuf.Bytes()) // can't fail for bytes.Buffer
} }
return nil return nil
...@@ -173,7 +172,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -173,7 +172,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
// from the encoding, loop until the stream is consumed. // from the encoding, loop until the stream is consumed.
for { for {
var txLen uint64 var txLen uint64
err := readUint64(r, &txLen, 3) err := readUint64(r, &txLen, TxLenSize)
// Getting an EOF when reading the txLen expected for a cleanly // Getting an EOF when reading the txLen expected for a cleanly
// encoded object. Silece the error and return success. // encoded object. Silece the error and return success.
if err == io.EOF { if err == io.EOF {
...@@ -187,7 +186,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -187,7 +186,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err return err
} }
p.Txs = append(p.Txs, tx) p.Txs = append(p.Txs, NewCachedTx(tx))
} }
} }
......
...@@ -297,9 +297,9 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -297,9 +297,9 @@ func testAppendSequencerBatchParamsEncodeDecode(
// compareTxs compares a list of two transactions, testing each pair by tx hash. // compareTxs compares a list of two transactions, testing each pair by tx hash.
// This is used rather than require.Equal, since there `time` metadata on the // This is used rather than require.Equal, since there `time` metadata on the
// decoded tx and the expected tx will differ, and can't be modified/ignored. // decoded tx and the expected tx will differ, and can't be modified/ignored.
func compareTxs(t *testing.T, a, b []*l2types.Transaction) { func compareTxs(t *testing.T, a []*l2types.Transaction, b []*sequencer.CachedTx) {
require.Equal(t, len(a), len(b)) require.Equal(t, len(a), len(b))
for i, txA := range a { for i, txA := range a {
require.Equal(t, txA.Hash(), b[i].Hash()) require.Equal(t, txA.Hash(), b[i].Tx().Hash())
} }
} }
...@@ -12,12 +12,15 @@ type Metrics struct { ...@@ -12,12 +12,15 @@ type Metrics struct {
// BatchSizeInBytes tracks the size of batch submission transactions. // BatchSizeInBytes tracks the size of batch submission transactions.
BatchSizeInBytes prometheus.Histogram BatchSizeInBytes prometheus.Histogram
// NumTxPerBatch tracks the number of L2 transactions in each batch // NumElementsPerBatch tracks the number of L2 transactions in each batch
// submission. // submission.
NumTxPerBatch prometheus.Histogram NumElementsPerBatch prometheus.Histogram
// SubmissionTimestamp tracks the time at which each batch was confirmed.
SubmissionTimestamp prometheus.Gauge
// SubmissionGasUsed tracks the amount of gas used to submit each batch. // SubmissionGasUsed tracks the amount of gas used to submit each batch.
SubmissionGasUsed prometheus.Histogram SubmissionGasUsed prometheus.Gauge
// BatchsSubmitted tracks the total number of successful batch submissions. // BatchsSubmitted tracks the total number of successful batch submissions.
BatchesSubmitted prometheus.Counter BatchesSubmitted prometheus.Counter
...@@ -32,6 +35,12 @@ type Metrics struct { ...@@ -32,6 +35,12 @@ type Metrics struct {
// BatchConfirmationTime tracks the duration it takes to confirm a batch // BatchConfirmationTime tracks the duration it takes to confirm a batch
// transaction. // transaction.
BatchConfirmationTime prometheus.Gauge BatchConfirmationTime prometheus.Gauge
// BatchPruneCount tracks the number of times a batch of sequencer
// transactions is pruned in order to meet the desired size requirements.
//
// NOTE: This is currently only active in the sequencer driver.
BatchPruneCount prometheus.Gauge
} }
func NewMetrics(subsystem string) *Metrics { func NewMetrics(subsystem string) *Metrics {
...@@ -41,40 +50,65 @@ func NewMetrics(subsystem string) *Metrics { ...@@ -41,40 +50,65 @@ func NewMetrics(subsystem string) *Metrics {
Help: "ETH balance of the batch submitter", Help: "ETH balance of the batch submitter",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
BatchSizeInBytes: promauto.NewHistogram(prometheus.HistogramOpts{ BatchSizeInBytes: promauto.NewSummary(prometheus.SummaryOpts{
Name: "batch_submitter_batch_size_in_bytes", Name: "batch_size_bytes",
Help: "Size of batches in bytes", Help: "Size of batches in bytes",
Subsystem: subsystem,
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
NumElementsPerBatch: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "num_elements_per_batch",
Help: "Number of transaction in each batch",
Buckets: []float64{
250,
500,
750,
1000,
1250,
1500,
1750,
2000,
2250,
2500,
2750,
3000,
},
Subsystem: subsystem, Subsystem: subsystem,
}), }),
NumTxPerBatch: promauto.NewHistogram(prometheus.HistogramOpts{ SubmissionTimestamp: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_num_txs_per_batch", Name: "submission_timestamp",
Help: "Number of transaction in each batch", Help: "Timestamp of last batch submitter submission",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
SubmissionGasUsed: promauto.NewHistogram(prometheus.HistogramOpts{ SubmissionGasUsed: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_submission_gas_used", Name: "submission_gas_used",
Help: "Gas used to submit each batch", Help: "Gas used to submit each batch",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
BatchesSubmitted: promauto.NewCounter(prometheus.CounterOpts{ BatchesSubmitted: promauto.NewCounter(prometheus.CounterOpts{
Name: "batch_submitter_batches_submitted", Name: "batches_submitted",
Help: "Count of batches submitted", Help: "Count of batches submitted",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
FailedSubmissions: promauto.NewCounter(prometheus.CounterOpts{ FailedSubmissions: promauto.NewCounter(prometheus.CounterOpts{
Name: "batch_submitter_failed_submissions", Name: "failed_submissions",
Help: "Count of failed batch submissions", Help: "Count of failed batch submissions",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
BatchTxBuildTime: promauto.NewGauge(prometheus.GaugeOpts{ BatchTxBuildTime: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_batch_tx_build_time", Name: "batch_tx_build_time_ms",
Help: "Time to construct batch transactions", Help: "Time to construct batch transactions",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
BatchConfirmationTime: promauto.NewGauge(prometheus.GaugeOpts{ BatchConfirmationTime: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_batch_confirmation_time", Name: "batch_submitter_batch_confirmation_time_ms",
Help: "Time to confirm batch transactions", Help: "Time to confirm batch transactions",
Subsystem: subsystem, Subsystem: subsystem,
}), }),
BatchPruneCount: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_batch_prune_count",
Help: "Number of times a batch is pruned",
Subsystem: subsystem,
}),
} }
} }
package mock
import (
"context"
"math/big"
"sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// L1ClientConfig houses the internal methods that are executed by the mock
// L1Client. Any members left as nil will panic on execution.
type L1ClientConfig struct {
// EstimateGas tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as
// other transactions may be added or removed by miners, but it should
// provide a basis for setting a reasonable default.
EstimateGas func(context.Context, ethereum.CallMsg) (uint64, error)
// NonceAt returns the account nonce of the given account. The block number
// can be nil, in which case the nonce is taken from the latest known block.
NonceAt func(context.Context, common.Address, *big.Int) (uint64, error)
// SendTransaction injects a signed transaction into the pending pool for
// execution.
//
// If the transaction was a contract creation use the TransactionReceipt
// method to get the contract address after the transaction has been mined.
SendTransaction func(context.Context, *types.Transaction) error
// TransactionReceipt returns the receipt of a transaction by transaction
// hash. Note that the receipt is not available for pending transactions.
TransactionReceipt func(context.Context, common.Hash) (*types.Receipt, error)
}
// L1Client represents a mock L1Client.
type L1Client struct {
cfg L1ClientConfig
mu sync.RWMutex
}
// NewL1Client returns a new L1Client using the mocked methods in the
// L1ClientConfig.
func NewL1Client(cfg L1ClientConfig) *L1Client {
return &L1Client{
cfg: cfg,
}
}
// EstimateGas executes the mock EstimateGas method.
func (c *L1Client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.EstimateGas(ctx, call)
}
// NonceAt executes the mock NonceAt method.
func (c *L1Client) NonceAt(ctx context.Context, addr common.Address, blockNumber *big.Int) (uint64, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.NonceAt(ctx, addr, blockNumber)
}
// SendTransaction executes the mock SendTransaction method.
func (c *L1Client) SendTransaction(ctx context.Context, tx *types.Transaction) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.SendTransaction(ctx, tx)
}
// TransactionReceipt executes the mock TransactionReceipt method.
func (c *L1Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.TransactionReceipt(ctx, txHash)
}
// SetEstimateGasFunc overrwrites the mock EstimateGas method.
func (c *L1Client) SetEstimateGasFunc(
f func(context.Context, ethereum.CallMsg) (uint64, error)) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.EstimateGas = f
}
// SetNonceAtFunc overrwrites the mock NonceAt method.
func (c *L1Client) SetNonceAtFunc(
f func(context.Context, common.Address, *big.Int) (uint64, error)) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.NonceAt = f
}
// SetSendTransactionFunc overrwrites the mock SendTransaction method.
func (c *L1Client) SetSendTransactionFunc(
f func(context.Context, *types.Transaction) error) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.SendTransaction = f
}
// SetTransactionReceiptFunc overwrites the mock TransactionReceipt method.
func (c *L1Client) SetTransactionReceiptFunc(
f func(context.Context, common.Hash) (*types.Receipt, error)) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.TransactionReceipt = f
}
package batchsubmitter package batchsubmitter
import ( import (
"bytes"
"context" "context"
"math/big" "math/big"
"sync" "sync"
...@@ -15,8 +16,8 @@ import ( ...@@ -15,8 +16,8 @@ import (
) )
var ( var (
// weiToGwei is the conversion rate from wei to gwei. // weiToEth is the conversion rate from wei to ether.
weiToGwei = new(big.Float).SetFloat64(1e-18) weiToEth = new(big.Float).SetFloat64(1e-18)
) )
// Driver is an interface for creating and submitting batch transactions for a // Driver is an interface for creating and submitting batch transactions for a
...@@ -32,18 +33,34 @@ type Driver interface { ...@@ -32,18 +33,34 @@ type Driver interface {
// Metrics returns the subservice telemetry object. // Metrics returns the subservice telemetry object.
Metrics() *metrics.Metrics Metrics() *metrics.Metrics
// ClearPendingTx a publishes a transaction at the next available nonce in
// order to clear any transactions in the mempool left over from a prior
// running instance of the batch submitter.
ClearPendingTx(context.Context, txmgr.TxManager, *ethclient.Client) error
// GetBatchBlockRange returns the start and end L2 block heights that // GetBatchBlockRange returns the start and end L2 block heights that
// need to be processed. Note that the end value is *exclusive*, // need to be processed. Note that the end value is *exclusive*,
// therefore if the returned values are identical nothing needs to be // therefore if the returned values are identical nothing needs to be
// processed. // processed.
GetBatchBlockRange(ctx context.Context) (*big.Int, *big.Int, error) GetBatchBlockRange(ctx context.Context) (*big.Int, *big.Int, error)
// SubmitBatchTx transforms the L2 blocks between start and end into a // CraftBatchTx transforms the L2 blocks between start and end into a batch
// batch transaction using the given nonce and gasPrice. The final // transaction using the given nonce. A dummy gas price is used in the
// transaction is published and returned to the call. // resulting transaction to use for size estimation.
//
// NOTE: This method SHOULD NOT publish the resulting transaction.
CraftBatchTx(
ctx context.Context,
start, end, nonce *big.Int,
) (*types.Transaction, error)
// SubmitBatchTx using the passed transaction as a template, signs and
// publishes an otherwise identical transaction after setting the provided
// gas price. The final transaction is returned to the caller.
SubmitBatchTx( SubmitBatchTx(
ctx context.Context, ctx context.Context,
start, end, nonce, gasPrice *big.Int, tx *types.Transaction,
gasPrice *big.Int,
) (*types.Transaction, error) ) (*types.Transaction, error)
} }
...@@ -51,6 +68,7 @@ type ServiceConfig struct { ...@@ -51,6 +68,7 @@ type ServiceConfig struct {
Context context.Context Context context.Context
Driver Driver Driver Driver
PollInterval time.Duration PollInterval time.Duration
ClearPendingTx bool
L1Client *ethclient.Client L1Client *ethclient.Client
TxManagerConfig txmgr.Config TxManagerConfig txmgr.Config
} }
...@@ -99,6 +117,19 @@ func (s *Service) eventLoop() { ...@@ -99,6 +117,19 @@ func (s *Service) eventLoop() {
name := s.cfg.Driver.Name() name := s.cfg.Driver.Name()
if s.cfg.ClearPendingTx {
const maxClearRetries = 3
for i := 0; i < maxClearRetries; i++ {
err := s.cfg.Driver.ClearPendingTx(s.ctx, s.txMgr, s.cfg.L1Client)
if err == nil {
break
} else if i < maxClearRetries-1 {
continue
}
log.Crit("Unable to confirm a clearing transaction", "err", err)
}
}
for { for {
select { select {
case <-time.After(s.cfg.PollInterval): case <-time.After(s.cfg.PollInterval):
...@@ -112,7 +143,7 @@ func (s *Service) eventLoop() { ...@@ -112,7 +143,7 @@ func (s *Service) eventLoop() {
log.Error(name+" unable to get current balance", "err", err) log.Error(name+" unable to get current balance", "err", err)
continue continue
} }
s.metrics.ETHBalance.Set(weiToGwei64(balance)) s.metrics.ETHBalance.Set(weiToEth64(balance))
// Determine the range of L2 blocks that the batch submitter has not // Determine the range of L2 blocks that the batch submitter has not
// processed, and needs to take action on. // processed, and needs to take action on.
...@@ -141,6 +172,26 @@ func (s *Service) eventLoop() { ...@@ -141,6 +172,26 @@ func (s *Service) eventLoop() {
} }
nonce := new(big.Int).SetUint64(nonce64) nonce := new(big.Int).SetUint64(nonce64)
batchTxBuildStart := time.Now()
tx, err := s.cfg.Driver.CraftBatchTx(
s.ctx, start, end, nonce,
)
if err != nil {
log.Error(name+" unable to craft batch tx",
"err", err)
continue
}
batchTxBuildTime := time.Since(batchTxBuildStart) / time.Millisecond
s.metrics.BatchTxBuildTime.Set(float64(batchTxBuildTime))
// Record the size of the batch transaction.
var txBuf bytes.Buffer
if err := tx.EncodeRLP(&txBuf); err != nil {
log.Error(name+" unable to encode batch tx", "err", err)
continue
}
s.metrics.BatchSizeInBytes.Observe(float64(len(txBuf.Bytes())))
// Construct the transaction submission clousure that will attempt // Construct the transaction submission clousure that will attempt
// to send the next transaction at the given nonce and gas price. // to send the next transaction at the given nonce and gas price.
sendTx := func( sendTx := func(
...@@ -151,14 +202,19 @@ func (s *Service) eventLoop() { ...@@ -151,14 +202,19 @@ func (s *Service) eventLoop() {
"end", end, "nonce", nonce, "end", end, "nonce", nonce,
"gasPrice", gasPrice) "gasPrice", gasPrice)
tx, err := s.cfg.Driver.SubmitBatchTx( tx, err := s.cfg.Driver.SubmitBatchTx(ctx, tx, gasPrice)
ctx, start, end, nonce, gasPrice,
)
if err != nil { if err != nil {
return nil, err return nil, err
} }
s.metrics.BatchSizeInBytes.Observe(float64(tx.Size())) log.Info(
name+" submitted batch tx",
"start", start,
"end", end,
"nonce", nonce,
"tx_hash", tx.Hash(),
"gasPrice", gasPrice,
)
return tx, nil return tx, nil
} }
...@@ -181,7 +237,8 @@ func (s *Service) eventLoop() { ...@@ -181,7 +237,8 @@ func (s *Service) eventLoop() {
time.Millisecond time.Millisecond
s.metrics.BatchConfirmationTime.Set(float64(batchConfirmationTime)) s.metrics.BatchConfirmationTime.Set(float64(batchConfirmationTime))
s.metrics.BatchesSubmitted.Inc() s.metrics.BatchesSubmitted.Inc()
s.metrics.SubmissionGasUsed.Observe(float64(receipt.GasUsed)) s.metrics.SubmissionGasUsed.Set(float64(receipt.GasUsed))
s.metrics.SubmissionTimestamp.Set(float64(time.Now().UnixNano() / 1e6))
case err := <-s.ctx.Done(): case err := <-s.ctx.Done():
log.Error(name+" service shutting down", "err", err) log.Error(name+" service shutting down", "err", err)
...@@ -190,9 +247,9 @@ func (s *Service) eventLoop() { ...@@ -190,9 +247,9 @@ func (s *Service) eventLoop() {
} }
} }
func weiToGwei64(wei *big.Int) float64 { func weiToEth64(wei *big.Int) float64 {
gwei := new(big.Float).SetInt(wei) eth := new(big.Float).SetInt(wei)
gwei.Mul(gwei, weiToGwei) eth.Mul(eth, weiToEth)
gwei64, _ := gwei.Float64() eth64, _ := eth.Float64()
return gwei64 return eth64
} }
package utils
import (
"math/big"
"github.com/ethereum/go-ethereum/params"
)
// GasPriceFromGwei converts an uint64 gas price in gwei to a big.Int in wei.
func GasPriceFromGwei(gasPriceInGwei uint64) *big.Int {
return new(big.Int).SetUint64(gasPriceInGwei * params.GWei)
}
package utils_test
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/go/batch-submitter/utils"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
// TestGasPriceFromGwei asserts that the integer value is scaled properly by
// 10^9.
func TestGasPriceFromGwei(t *testing.T) {
require.Equal(t, utils.GasPriceFromGwei(0), new(big.Int))
require.Equal(t, utils.GasPriceFromGwei(1), big.NewInt(params.GWei))
require.Equal(t, utils.GasPriceFromGwei(100), big.NewInt(100*params.GWei))
}
...@@ -40,7 +40,7 @@ options. ...@@ -40,7 +40,7 @@ options.
``` ```
NAME: NAME:
gas-oracle - Remotely Control the Optimistic Ethereum Gas Price gas-oracle - Remotely Control the Optimism Gas Price
USAGE: USAGE:
gas-oracle [global options] command [command options] [arguments...] gas-oracle [global options] command [command options] [arguments...]
...@@ -49,7 +49,7 @@ VERSION: ...@@ -49,7 +49,7 @@ VERSION:
0.0.0-1.10.4-stable 0.0.0-1.10.4-stable
DESCRIPTION: DESCRIPTION:
Configure with a private key and an Optimistic Ethereum HTTP endpoint to send transactions that update the L2 gas price. Configure with a private key and an Optimism HTTP endpoint to send transactions that update the L2 gas price.
COMMANDS: COMMANDS:
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
......
...@@ -26,8 +26,8 @@ func main() { ...@@ -26,8 +26,8 @@ func main() {
app.Version = GitVersion + "-" + params.VersionWithCommit(GitCommit, GitDate) app.Version = GitVersion + "-" + params.VersionWithCommit(GitCommit, GitDate)
app.Name = "gas-oracle" app.Name = "gas-oracle"
app.Usage = "Remotely Control the Optimistic Ethereum Gas Price" app.Usage = "Remotely Control the Optimism Gas Price"
app.Description = "Configure with a private key and an Optimistic Ethereum HTTP endpoint " + app.Description = "Configure with a private key and an Optimism HTTP endpoint " +
"to send transactions that update the L2 gas price." "to send transactions that update the L2 gas price."
// Configure the logging // Configure the logging
......
SHELL := /bin/bash SHELL := /bin/bash
VERSION := `git describe --abbrev=0` ifndef VERSION
VERSION := `jq .version package.json `
endif
ifndef GITCOMMIT
GITCOMMIT := `git rev-parse HEAD` GITCOMMIT := `git rev-parse HEAD`
endif
ifndef BUILDDATE
BUILDDATE := `date +%Y-%m-%d` BUILDDATE := `date +%Y-%m-%d`
BUILDUSER := `whoami` endif
LDFLAGSSTRING :=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.Version=$(VERSION) LDFLAGSSTRING :=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.Version=$(VERSION)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.GitCommit=$(GITCOMMIT) LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.BuildDate=$(BUILDDATE) LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.BuildDate=$(BUILDDATE)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/go/op_exporter/version.BuildUser=$(BUILDUSER)
LDFLAGS :=-ldflags "$(LDFLAGSSTRING)" LDFLAGS :=-ldflags "$(LDFLAGSSTRING)"
......
# op_exporter # op_exporter
A prometheus exporter to collect information from an Optimistic Ethereum node and serve metrics for collection A prometheus exporter to collect information from an Optimism node and serve metrics for collection
## Usage ## Usage
......
...@@ -24,6 +24,12 @@ var ( ...@@ -24,6 +24,12 @@ var (
Help: "Is the sequencer healthy?"}, Help: "Is the sequencer healthy?"},
[]string{"network"}, []string{"network"},
) )
opExporterVersion = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "op_exporter_version",
Help: "Verion of the op-exporter software"},
[]string{"version", "commit", "goVersion", "buildDate"},
)
) )
func init() { func init() {
...@@ -31,4 +37,6 @@ func init() { ...@@ -31,4 +37,6 @@ func init() {
prometheus.MustRegister(gasPrice) prometheus.MustRegister(gasPrice)
prometheus.MustRegister(blockNumber) prometheus.MustRegister(blockNumber)
prometheus.MustRegister(healthySequencer) prometheus.MustRegister(healthySequencer)
prometheus.MustRegister(opExporterVersion)
} }
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
...@@ -20,6 +21,8 @@ import ( ...@@ -20,6 +21,8 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
) )
var UnknownStatus = "UNKNOWN"
var ( var (
listenAddress = kingpin.Flag( listenAddress = kingpin.Flag(
"web.listen-address", "web.listen-address",
...@@ -48,7 +51,7 @@ var ( ...@@ -48,7 +51,7 @@ var (
enableK8sQuery = kingpin.Flag( enableK8sQuery = kingpin.Flag(
"k8s.enable", "k8s.enable",
"Enable kubernetes info lookup.", "Enable kubernetes info lookup.",
).Default("true").Bool() ).Default("false").Bool()
) )
type healthCheck struct { type healthCheck struct {
...@@ -79,14 +82,15 @@ func main() { ...@@ -79,14 +82,15 @@ func main() {
log.Infoln("exporter config", *listenAddress, *rpcProvider, *networkLabel) log.Infoln("exporter config", *listenAddress, *rpcProvider, *networkLabel)
log.Infoln("Starting op_exporter", version.Info()) log.Infoln("Starting op_exporter", version.Info())
log.Infoln("Build context", version.BuildContext()) log.Infoln("Build context", version.BuildContext())
opExporterVersion.WithLabelValues(
strings.Trim(version.Version, "\""), version.GitCommit, version.GoVersion, version.BuildDate).Inc()
health := healthCheck{ health := healthCheck{
mu: new(sync.RWMutex), mu: new(sync.RWMutex),
height: 0, height: 0,
healthy: false, healthy: false,
updateTime: time.Now(), updateTime: time.Now(),
allowedMethods: nil, allowedMethods: nil,
version: nil, version: &UnknownStatus,
} }
http.Handle("/metrics", promhttp.Handler()) http.Handle("/metrics", promhttp.Handler())
http.Handle("/health", healthHandler(&health)) http.Handle("/health", healthHandler(&health))
...@@ -130,8 +134,7 @@ func getSequencerVersion(health *healthCheck, client *kubernetes.Clientset) { ...@@ -130,8 +134,7 @@ func getSequencerVersion(health *healthCheck, client *kubernetes.Clientset) {
} }
sequencerStatefulSet, err := client.AppsV1().StatefulSets(string(ns)).Get(context.TODO(), "sequencer", getOpts) sequencerStatefulSet, err := client.AppsV1().StatefulSets(string(ns)).Get(context.TODO(), "sequencer", getOpts)
if err != nil { if err != nil {
unknownStatus := "UNKNOWN" health.version = &UnknownStatus
health.version = &unknownStatus
log.Errorf("Unable to retrieve a sequencer StatefulSet: %s", err) log.Errorf("Unable to retrieve a sequencer StatefulSet: %s", err)
continue continue
} }
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
var ( var (
Version string Version string
GitCommit string GitCommit string
BuildUser string
BuildDate string BuildDate string
GoVersion = runtime.Version() GoVersion = runtime.Version()
) )
...@@ -18,5 +17,5 @@ func Info() string { ...@@ -18,5 +17,5 @@ func Info() string {
} }
func BuildContext() string { func BuildContext() string {
return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) return fmt.Sprintf("(go=%s, date=%s)", GoVersion, BuildDate)
} }
...@@ -4,7 +4,7 @@ ARG GITCOMMIT=docker ...@@ -4,7 +4,7 @@ ARG GITCOMMIT=docker
ARG GITDATE=docker ARG GITDATE=docker
ARG GITVERSION=docker ARG GITVERSION=docker
RUN apk add make jq git RUN apk add make jq git gcc musl-dev linux-headers
WORKDIR /app WORKDIR /app
COPY ./go/proxyd /app COPY ./go/proxyd /app
......
...@@ -7,16 +7,18 @@ import ( ...@@ -7,16 +7,18 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/gorilla/websocket"
"github.com/prometheus/client_golang/prometheus"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"math/rand" "math/rand"
"net/http" "net/http"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/gorilla/websocket"
"github.com/prometheus/client_golang/prometheus"
) )
const ( const (
...@@ -84,6 +86,8 @@ type Backend struct { ...@@ -84,6 +86,8 @@ type Backend struct {
maxRPS int maxRPS int
maxWSConns int maxWSConns int
outOfServiceInterval time.Duration outOfServiceInterval time.Duration
stripTrailingXFF bool
proxydIP string
} }
type BackendOpt func(b *Backend) type BackendOpt func(b *Backend)
...@@ -140,6 +144,18 @@ func WithTLSConfig(tlsConfig *tls.Config) BackendOpt { ...@@ -140,6 +144,18 @@ func WithTLSConfig(tlsConfig *tls.Config) BackendOpt {
} }
} }
func WithStrippedTrailingXFF() BackendOpt {
return func(b *Backend) {
b.stripTrailingXFF = true
}
}
func WithProxydIP(ip string) BackendOpt {
return func(b *Backend) {
b.proxydIP = ip
}
}
func NewBackend( func NewBackend(
name string, name string,
rpcURL string, rpcURL string,
...@@ -163,6 +179,10 @@ func NewBackend( ...@@ -163,6 +179,10 @@ func NewBackend(
opt(backend) opt(backend)
} }
if !backend.stripTrailingXFF && backend.proxydIP == "" {
log.Warn("proxied requests' XFF header will not contain the proxyd ip address")
}
return backend return backend
} }
...@@ -316,7 +336,18 @@ func (b *Backend) doForward(ctx context.Context, rpcReq *RPCReq) (*RPCRes, error ...@@ -316,7 +336,18 @@ func (b *Backend) doForward(ctx context.Context, rpcReq *RPCReq) (*RPCRes, error
httpReq.SetBasicAuth(b.authUsername, b.authPassword) httpReq.SetBasicAuth(b.authUsername, b.authPassword)
} }
xForwardedFor := GetXForwardedFor(ctx)
if b.stripTrailingXFF {
ipList := strings.Split(xForwardedFor, ", ")
if len(ipList) > 0 {
xForwardedFor = ipList[0]
}
} else if b.proxydIP != "" {
xForwardedFor = fmt.Sprintf("%s, %s", xForwardedFor, b.proxydIP)
}
httpReq.Header.Set("content-type", "application/json") httpReq.Header.Set("content-type", "application/json")
httpReq.Header.Set("X-Forwarded-For", xForwardedFor)
httpRes, err := b.client.Do(httpReq) httpRes, err := b.client.Do(httpReq)
if err != nil { if err != nil {
......
package proxyd
import (
"context"
"encoding/json"
"github.com/go-redis/redis/v8"
"github.com/golang/snappy"
lru "github.com/hashicorp/golang-lru"
)
type Cache interface {
Get(ctx context.Context, key string) (string, error)
Put(ctx context.Context, key string, value string) error
}
// assuming an average RPCRes size of 3 KB
const (
memoryCacheLimit = 4096
numBlockConfirmations = 50
)
type cache struct {
lru *lru.Cache
}
func newMemoryCache() *cache {
rep, _ := lru.New(memoryCacheLimit)
return &cache{rep}
}
func (c *cache) Get(ctx context.Context, key string) (string, error) {
if val, ok := c.lru.Get(key); ok {
return val.(string), nil
}
return "", nil
}
func (c *cache) Put(ctx context.Context, key string, value string) error {
c.lru.Add(key, value)
return nil
}
type redisCache struct {
rdb *redis.Client
}
func newRedisCache(url string) (*redisCache, error) {
opts, err := redis.ParseURL(url)
if err != nil {
return nil, err
}
rdb := redis.NewClient(opts)
if err := rdb.Ping(context.Background()).Err(); err != nil {
return nil, wrapErr(err, "error connecting to redis")
}
return &redisCache{rdb}, nil
}
func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
val, err := c.rdb.Get(ctx, key).Result()
if err == redis.Nil {
return "", nil
} else if err != nil {
return "", err
}
return val, nil
}
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
err := c.rdb.Set(ctx, key, value, 0).Err()
return err
}
type GetLatestBlockNumFn func(ctx context.Context) (uint64, error)
type RPCCache interface {
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error
}
type rpcCache struct {
cache Cache
getLatestBlockNumFn GetLatestBlockNumFn
handlers map[string]RPCMethodHandler
}
func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn) RPCCache {
handlers := map[string]RPCMethodHandler{
"eth_chainId": &StaticRPCMethodHandler{"eth_chainId"},
"net_version": &StaticRPCMethodHandler{"net_version"},
"eth_getBlockByNumber": &EthGetBlockByNumberMethod{getLatestBlockNumFn},
"eth_getBlockRange": &EthGetBlockRangeMethod{getLatestBlockNumFn},
}
return &rpcCache{cache: cache, getLatestBlockNumFn: getLatestBlockNumFn, handlers: handlers}
}
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
handler := c.handlers[req.Method]
if handler == nil {
return nil, nil
}
cacheable, err := handler.IsCacheable(req)
if err != nil {
return nil, err
}
if !cacheable {
return nil, nil
}
key := handler.CacheKey(req)
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return nil, err
}
if encodedVal == "" {
return nil, nil
}
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return nil, err
}
res := new(RPCRes)
err = json.Unmarshal(val, res)
if err != nil {
return nil, err
}
res.ID = req.ID
return res, nil
}
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
handler := c.handlers[req.Method]
if handler == nil {
return nil
}
cacheable, err := handler.IsCacheable(req)
if err != nil {
return err
}
if !cacheable {
return nil
}
requiresConfirmations, err := handler.RequiresUnconfirmedBlocks(ctx, req)
if err != nil {
return err
}
if requiresConfirmations {
return nil
}
key := handler.CacheKey(req)
val := mustMarshalJSON(res)
encodedVal := snappy.Encode(nil, val)
return c.cache.Put(ctx, key, string(encodedVal))
}
package proxyd
import (
"context"
"math"
"strconv"
"testing"
"github.com/stretchr/testify/require"
)
func TestRPCCacheWhitelist(t *testing.T) {
const blockHead = math.MaxUint64
ctx := context.Background()
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
res *RPCRes
name string
}{
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_chainId",
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: "0xff",
ID: ID,
},
name: "eth_chainId",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "net_version",
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: "9999",
ID: ID,
},
name: "net_version",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["0x1", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
},
name: "eth_getBlockByNumber",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["earliest", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
},
name: "eth_getBlockByNumber earliest",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["0x1", "0x2", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "eth_getBlockRange",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["earliest", "0x2", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "eth_getBlockRange earliest",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
err := cache.PutRPC(ctx, rpc.req, rpc.res)
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.NoError(t, err)
require.Equal(t, rpc.res, cachedRes)
})
}
}
func TestRPCCacheUnsupportedMethod(t *testing.T) {
const blockHead = math.MaxUint64
ctx := context.Background()
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
req := &RPCReq{
JSONRPC: "2.0",
Method: "eth_blockNumber",
ID: ID,
}
res := &RPCRes{
JSONRPC: "2.0",
Result: `0x1000`,
ID: ID,
}
err := cache.PutRPC(ctx, req, res)
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, req)
require.NoError(t, err)
require.Nil(t, cachedRes)
}
func TestRPCCacheEthGetBlockByNumberForRecentBlocks(t *testing.T) {
ctx := context.Background()
var blockHead uint64 = 2
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
res *RPCRes
name string
}{
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["0x1", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
},
name: "recent block num",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["latest", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
},
name: "latest block",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["pending", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
},
name: "pending block",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
err := cache.PutRPC(ctx, rpc.req, rpc.res)
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.NoError(t, err)
require.Nil(t, cachedRes)
})
}
}
func TestRPCCacheEthGetBlockByNumberInvalidRequest(t *testing.T) {
ctx := context.Background()
const blockHead = math.MaxUint64
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
req := &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
Params: []byte(`["0x1"]`), // missing required boolean param
ID: ID,
}
res := &RPCRes{
JSONRPC: "2.0",
Result: `{"difficulty": "0x1", "number": "0x1"}`,
ID: ID,
}
err := cache.PutRPC(ctx, req, res)
require.Error(t, err)
cachedRes, err := cache.GetRPC(ctx, req)
require.Error(t, err)
require.Nil(t, cachedRes)
}
func TestRPCCacheEthGetBlockRangeForRecentBlocks(t *testing.T) {
ctx := context.Background()
var blockHead uint64 = 0x1000
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
res *RPCRes
name string
}{
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["0x1", "0x1000", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "recent block num",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["0x1", "latest", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "latest block",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["0x1", "pending", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "pending block",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["latest", "0x1000", false]`),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "latest block 2",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
err := cache.PutRPC(ctx, rpc.req, rpc.res)
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.NoError(t, err)
require.Nil(t, cachedRes)
})
}
}
func TestRPCCacheEthGetBlockRangeInvalidRequest(t *testing.T) {
ctx := context.Background()
const blockHead = math.MaxUint64
fn := func(ctx context.Context) (uint64, error) {
return blockHead, nil
}
cache := newRPCCache(newMemoryCache(), fn)
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
res *RPCRes
name string
}{
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["0x1", "0x2"]`), // missing required boolean param
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "missing boolean param",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
Params: []byte(`["abc", "0x2", true]`), // invalid block hex
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
ID: ID,
},
name: "invalid block hex",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
err := cache.PutRPC(ctx, rpc.req, rpc.res)
require.Error(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.Error(t, err)
require.Nil(t, cachedRes)
})
}
}
...@@ -14,6 +14,11 @@ type ServerConfig struct { ...@@ -14,6 +14,11 @@ type ServerConfig struct {
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"` MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
} }
type CacheConfig struct {
Enabled bool `toml:"enabled"`
BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
}
type RedisConfig struct { type RedisConfig struct {
URL string `toml:"url"` URL string `toml:"url"`
} }
...@@ -32,15 +37,16 @@ type BackendOptions struct { ...@@ -32,15 +37,16 @@ type BackendOptions struct {
} }
type BackendConfig struct { type BackendConfig struct {
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
RPCURL string `toml:"rpc_url"` RPCURL string `toml:"rpc_url"`
WSURL string `toml:"ws_url"` WSURL string `toml:"ws_url"`
MaxRPS int `toml:"max_rps"` MaxRPS int `toml:"max_rps"`
MaxWSConns int `toml:"max_ws_conns"` MaxWSConns int `toml:"max_ws_conns"`
CAFile string `toml:"ca_file"` CAFile string `toml:"ca_file"`
ClientCertFile string `toml:"client_cert_file"` ClientCertFile string `toml:"client_cert_file"`
ClientKeyFile string `toml:"client_key_file"` ClientKeyFile string `toml:"client_key_file"`
StripTrailingXFF bool `toml:"strip_trailing_xff"`
} }
type BackendsConfig map[string]*BackendConfig type BackendsConfig map[string]*BackendConfig
...@@ -56,6 +62,7 @@ type MethodMappingsConfig map[string]string ...@@ -56,6 +62,7 @@ type MethodMappingsConfig map[string]string
type Config struct { type Config struct {
WSBackendGroup string `toml:"ws_backend_group"` WSBackendGroup string `toml:"ws_backend_group"`
Server *ServerConfig `toml:"server"` Server *ServerConfig `toml:"server"`
Cache *CacheConfig `toml:"cache"`
Redis *RedisConfig `toml:"redis"` Redis *RedisConfig `toml:"redis"`
Metrics *MetricsConfig `toml:"metrics"` Metrics *MetricsConfig `toml:"metrics"`
BackendOptions *BackendOptions `toml:"backend"` BackendOptions *BackendOptions `toml:"backend"`
......
...@@ -6,8 +6,11 @@ require ( ...@@ -6,8 +6,11 @@ require (
github.com/BurntSushi/toml v0.4.1 github.com/BurntSushi/toml v0.4.1
github.com/ethereum/go-ethereum v1.10.11 github.com/ethereum/go-ethereum v1.10.11
github.com/go-redis/redis/v8 v8.11.4 github.com/go-redis/redis/v8 v8.11.4
github.com/golang/snappy v0.0.4
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.11.0
github.com/rs/cors v1.8.0 github.com/rs/cors v1.8.0
github.com/stretchr/testify v1.7.0
) )
This diff is collapsed.
package proxyd
import (
"context"
"sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
const blockHeadSyncPeriod = 1 * time.Second
type LatestBlockHead struct {
url string
client *ethclient.Client
quit chan struct{}
done chan struct{}
mutex sync.RWMutex
blockNum uint64
}
func newLatestBlockHead(url string) (*LatestBlockHead, error) {
client, err := ethclient.Dial(url)
if err != nil {
return nil, err
}
return &LatestBlockHead{
url: url,
client: client,
quit: make(chan struct{}),
done: make(chan struct{}),
}, nil
}
func (h *LatestBlockHead) Start() {
go func() {
ticker := time.NewTicker(blockHeadSyncPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
blockNum, err := h.getBlockNum()
if err != nil {
log.Error("error retrieving latest block number", "error", err)
continue
}
log.Trace("polling block number", "blockNum", blockNum)
h.mutex.Lock()
h.blockNum = blockNum
h.mutex.Unlock()
case <-h.quit:
close(h.done)
return
}
}
}()
}
func (h *LatestBlockHead) getBlockNum() (uint64, error) {
const maxRetries = 5
var err error
for i := 0; i <= maxRetries; i++ {
var blockNum uint64
blockNum, err = h.client.BlockNumber(context.Background())
if err != nil {
backoff := calcBackoff(i)
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
time.Sleep(backoff)
continue
}
return blockNum, nil
}
return 0, wrapErr(err, "exceeded retries")
}
func (h *LatestBlockHead) Stop() {
close(h.quit)
<-h.done
h.client.Close()
}
func (h *LatestBlockHead) GetBlockNum() uint64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.blockNum
}
package proxyd
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
)
var errInvalidRPCParams = errors.New("invalid RPC params")
type RPCMethodHandler interface {
CacheKey(req *RPCReq) string
IsCacheable(req *RPCReq) (bool, error)
RequiresUnconfirmedBlocks(ctx context.Context, req *RPCReq) (bool, error)
}
type StaticRPCMethodHandler struct {
method string
}
func (s *StaticRPCMethodHandler) CacheKey(req *RPCReq) string {
return fmt.Sprintf("method:%s", s.method)
}
func (s *StaticRPCMethodHandler) IsCacheable(*RPCReq) (bool, error) { return true, nil }
func (s *StaticRPCMethodHandler) RequiresUnconfirmedBlocks(context.Context, *RPCReq) (bool, error) {
return false, nil
}
type EthGetBlockByNumberMethod struct {
getLatestBlockNumFn GetLatestBlockNumFn
}
func (e *EthGetBlockByNumberMethod) CacheKey(req *RPCReq) string {
input, includeTx, err := decodeGetBlockByNumberParams(req.Params)
if err != nil {
return ""
}
return fmt.Sprintf("method:eth_getBlockByNumber:%s:%t", input, includeTx)
}
func (e *EthGetBlockByNumberMethod) IsCacheable(req *RPCReq) (bool, error) {
blockNum, _, err := decodeGetBlockByNumberParams(req.Params)
if err != nil {
return false, err
}
return !isBlockDependentParam(blockNum), nil
}
func (e *EthGetBlockByNumberMethod) RequiresUnconfirmedBlocks(ctx context.Context, req *RPCReq) (bool, error) {
curBlock, err := e.getLatestBlockNumFn(ctx)
if err != nil {
return false, err
}
blockInput, _, err := decodeGetBlockByNumberParams(req.Params)
if err != nil {
return false, err
}
if isBlockDependentParam(blockInput) {
return true, nil
}
if blockInput == "earliest" {
return false, nil
}
blockNum, err := decodeBlockInput(blockInput)
if err != nil {
return false, err
}
return curBlock <= blockNum+numBlockConfirmations, nil
}
type EthGetBlockRangeMethod struct {
getLatestBlockNumFn GetLatestBlockNumFn
}
func (e *EthGetBlockRangeMethod) CacheKey(req *RPCReq) string {
start, end, includeTx, err := decodeGetBlockRangeParams(req.Params)
if err != nil {
return ""
}
return fmt.Sprintf("method:eth_getBlockRange:%s:%s:%t", start, end, includeTx)
}
func (e *EthGetBlockRangeMethod) IsCacheable(req *RPCReq) (bool, error) {
start, end, _, err := decodeGetBlockRangeParams(req.Params)
if err != nil {
return false, err
}
return !isBlockDependentParam(start) && !isBlockDependentParam(end), nil
}
func (e *EthGetBlockRangeMethod) RequiresUnconfirmedBlocks(ctx context.Context, req *RPCReq) (bool, error) {
curBlock, err := e.getLatestBlockNumFn(ctx)
if err != nil {
return false, err
}
start, end, _, err := decodeGetBlockRangeParams(req.Params)
if err != nil {
return false, err
}
if isBlockDependentParam(start) || isBlockDependentParam(end) {
return true, nil
}
if start == "earliest" && end == "earliest" {
return false, nil
}
if start != "earliest" {
startNum, err := decodeBlockInput(start)
if err != nil {
return false, err
}
if curBlock <= startNum+numBlockConfirmations {
return true, nil
}
}
if end != "earliest" {
endNum, err := decodeBlockInput(end)
if err != nil {
return false, err
}
if curBlock <= endNum+numBlockConfirmations {
return true, nil
}
}
return false, nil
}
func isBlockDependentParam(s string) bool {
return s == "latest" || s == "pending"
}
func decodeGetBlockByNumberParams(params json.RawMessage) (string, bool, error) {
var list []interface{}
if err := json.Unmarshal(params, &list); err != nil {
return "", false, err
}
if len(list) != 2 {
return "", false, errInvalidRPCParams
}
blockNum, ok := list[0].(string)
if !ok {
return "", false, errInvalidRPCParams
}
includeTx, ok := list[1].(bool)
if !ok {
return "", false, errInvalidRPCParams
}
if !validBlockInput(blockNum) {
return "", false, errInvalidRPCParams
}
return blockNum, includeTx, nil
}
func decodeGetBlockRangeParams(params json.RawMessage) (string, string, bool, error) {
var list []interface{}
if err := json.Unmarshal(params, &list); err != nil {
return "", "", false, err
}
if len(list) != 3 {
return "", "", false, errInvalidRPCParams
}
startBlockNum, ok := list[0].(string)
if !ok {
return "", "", false, errInvalidRPCParams
}
endBlockNum, ok := list[1].(string)
if !ok {
return "", "", false, errInvalidRPCParams
}
includeTx, ok := list[2].(bool)
if !ok {
return "", "", false, errInvalidRPCParams
}
if !validBlockInput(startBlockNum) || !validBlockInput(endBlockNum) {
return "", "", false, errInvalidRPCParams
}
return startBlockNum, endBlockNum, includeTx, nil
}
func decodeBlockInput(input string) (uint64, error) {
return hexutil.DecodeUint64(input)
}
func validBlockInput(input string) bool {
if input == "earliest" || input == "pending" || input == "latest" {
return true
}
_, err := decodeBlockInput(input)
return err == nil
}
...@@ -2,10 +2,11 @@ package proxyd ...@@ -2,10 +2,11 @@ package proxyd
import ( import (
"context" "context"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
) )
const ( const (
...@@ -20,6 +21,8 @@ const ( ...@@ -20,6 +21,8 @@ const (
MethodUnknown = "unknown" MethodUnknown = "unknown"
) )
var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000}
var ( var (
rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{ rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
...@@ -139,6 +142,25 @@ var ( ...@@ -139,6 +142,25 @@ var (
"source", "source",
}) })
requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "request_payload_sizes",
Help: "Gauge of client request payload sizes.",
Buckets: PayloadSizeBuckets,
}, []string{
"auth",
"method_name",
})
responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "response_payload_sizes",
Help: "Gauge of client response payload sizes.",
Buckets: PayloadSizeBuckets,
}, []string{
"auth",
})
rpcSpecialErrors = []string{ rpcSpecialErrors = []string{
"nonce too low", "nonce too low",
"gas price too high", "gas price too high",
...@@ -185,3 +207,11 @@ func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, ...@@ -185,3 +207,11 @@ func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string,
} }
} }
} }
func RecordRequestPayloadSize(ctx context.Context, method string, payloadSize int) {
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx), method).Observe(float64(payloadSize))
}
func RecordResponsePayloadSize(ctx context.Context, payloadSize int) {
responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
}
package proxyd package proxyd
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
func Start(config *Config) error { func Start(config *Config) error {
...@@ -96,6 +98,10 @@ func Start(config *Config) error { ...@@ -96,6 +98,10 @@ func Start(config *Config) error {
log.Info("using custom TLS config for backend", "name", name) log.Info("using custom TLS config for backend", "name", name)
opts = append(opts, WithTLSConfig(tlsConfig)) opts = append(opts, WithTLSConfig(tlsConfig))
} }
if cfg.StripTrailingXFF {
opts = append(opts, WithStrippedTrailingXFF())
}
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
back := NewBackend(name, rpcURL, wsURL, lim, opts...) back := NewBackend(name, rpcURL, wsURL, lim, opts...)
backendNames = append(backendNames, name) backendNames = append(backendNames, name)
backendsByName[name] = back backendsByName[name] = back
...@@ -149,6 +155,35 @@ func Start(config *Config) error { ...@@ -149,6 +155,35 @@ func Start(config *Config) error {
} }
} }
var rpcCache RPCCache
if config.Cache != nil && config.Cache.Enabled {
var cache Cache
if config.Redis != nil {
if cache, err = newRedisCache(config.Redis.URL); err != nil {
return err
}
} else {
log.Warn("redis is not configured, using in-memory cache")
cache = newMemoryCache()
}
var getLatestBlockNumFn GetLatestBlockNumFn
if config.Cache.BlockSyncRPCURL == "" {
return fmt.Errorf("block sync node required for caching")
}
latestHead, err := newLatestBlockHead(config.Cache.BlockSyncRPCURL)
if err != nil {
return err
}
latestHead.Start()
defer latestHead.Stop()
getLatestBlockNumFn = func(ctx context.Context) (uint64, error) {
return latestHead.GetBlockNum(), nil
}
rpcCache = newRPCCache(cache, getLatestBlockNumFn)
}
srv := NewServer( srv := NewServer(
backendGroups, backendGroups,
wsBackendGroup, wsBackendGroup,
...@@ -156,9 +191,10 @@ func Start(config *Config) error { ...@@ -156,9 +191,10 @@ func Start(config *Config) error {
config.RPCMethodMappings, config.RPCMethodMappings,
config.Server.MaxBodySizeBytes, config.Server.MaxBodySizeBytes,
resolvedAuth, resolvedAuth,
rpcCache,
) )
if config.Metrics.Enabled { if config.Metrics != nil && config.Metrics.Enabled {
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port) addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
log.Info("starting metrics server", "addr", addr) log.Info("starting metrics server", "addr", addr)
go http.ListenAndServe(addr, promhttp.Handler()) go http.ListenAndServe(addr, promhttp.Handler())
......
...@@ -5,20 +5,23 @@ import ( ...@@ -5,20 +5,23 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/rs/cors" "github.com/rs/cors"
"io"
"net/http"
"strconv"
"time"
) )
const ( const (
ContextKeyAuth = "authorization" ContextKeyAuth = "authorization"
ContextKeyReqID = "req_id" ContextKeyReqID = "req_id"
ContextKeyXForwardedFor = "x_forwarded_for"
) )
type Server struct { type Server struct {
...@@ -31,6 +34,7 @@ type Server struct { ...@@ -31,6 +34,7 @@ type Server struct {
upgrader *websocket.Upgrader upgrader *websocket.Upgrader
rpcServer *http.Server rpcServer *http.Server
wsServer *http.Server wsServer *http.Server
cache RPCCache
} }
func NewServer( func NewServer(
...@@ -40,7 +44,11 @@ func NewServer( ...@@ -40,7 +44,11 @@ func NewServer(
rpcMethodMappings map[string]string, rpcMethodMappings map[string]string,
maxBodySize int64, maxBodySize int64,
authenticatedPaths map[string]string, authenticatedPaths map[string]string,
cache RPCCache,
) *Server { ) *Server {
if cache == nil {
cache = &NoopRPCCache{}
}
return &Server{ return &Server{
backendGroups: backendGroups, backendGroups: backendGroups,
wsBackendGroup: wsBackendGroup, wsBackendGroup: wsBackendGroup,
...@@ -48,6 +56,7 @@ func NewServer( ...@@ -48,6 +56,7 @@ func NewServer(
rpcMethodMappings: rpcMethodMappings, rpcMethodMappings: rpcMethodMappings,
maxBodySize: maxBodySize, maxBodySize: maxBodySize,
authenticatedPaths: authenticatedPaths, authenticatedPaths: authenticatedPaths,
cache: cache,
upgrader: &websocket.Upgrader{ upgrader: &websocket.Upgrader{
HandshakeTimeout: 5 * time.Second, HandshakeTimeout: 5 * time.Second,
}, },
...@@ -113,13 +122,15 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -113,13 +122,15 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"user_agent", r.Header.Get("user-agent"), "user_agent", r.Header.Get("user-agent"),
) )
req, err := ParseRPCReq(io.LimitReader(r.Body, s.maxBodySize)) bodyReader := &recordLenReader{Reader: io.LimitReader(r.Body, s.maxBodySize)}
req, err := ParseRPCReq(bodyReader)
if err != nil { if err != nil {
log.Info("rejected request with bad rpc request", "source", "rpc", "err", err) log.Info("rejected request with bad rpc request", "source", "rpc", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
writeRPCError(ctx, w, nil, err) writeRPCError(ctx, w, nil, err)
return return
} }
RecordRequestPayloadSize(ctx, req.Method, bodyReader.Len)
group := s.rpcMethodMappings[req.Method] group := s.rpcMethodMappings[req.Method]
if group == "" { if group == "" {
...@@ -136,7 +147,21 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -136,7 +147,21 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
return return
} }
backendRes, err := s.backendGroups[group].Forward(ctx, req) var backendRes *RPCRes
backendRes, err = s.cache.GetRPC(ctx, req)
if err == nil && backendRes != nil {
writeRPCRes(ctx, w, backendRes)
return
}
if err != nil {
log.Warn(
"cache lookup error",
"req_id", GetReqID(ctx),
"err", err,
)
}
backendRes, err = s.backendGroups[group].Forward(ctx, req)
if err != nil { if err != nil {
log.Error( log.Error(
"error forwarding RPC request", "error forwarding RPC request",
...@@ -148,6 +173,16 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -148,6 +173,16 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
return return
} }
if backendRes.Error == nil {
if err = s.cache.PutRPC(ctx, req, backendRes); err != nil {
log.Warn(
"cache put error",
"req_id", GetReqID(ctx),
"err", err,
)
}
}
writeRPCRes(ctx, w, backendRes) writeRPCRes(ctx, w, backendRes)
} }
...@@ -214,7 +249,16 @@ func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context ...@@ -214,7 +249,16 @@ func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context
return nil return nil
} }
xff := r.Header.Get("X-Forwarded-For")
if xff == "" {
ipPort := strings.Split(r.RemoteAddr, ":")
if len(ipPort) == 2 {
xff = ipPort[0]
}
}
ctx := context.WithValue(r.Context(), ContextKeyAuth, s.authenticatedPaths[authorization]) ctx := context.WithValue(r.Context(), ContextKeyAuth, s.authenticatedPaths[authorization])
ctx = context.WithValue(ctx, ContextKeyXForwardedFor, xff)
return context.WithValue( return context.WithValue(
ctx, ctx,
ContextKeyReqID, ContextKeyReqID,
...@@ -237,14 +281,17 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) { ...@@ -237,14 +281,17 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
if res.IsError() && res.Error.HTTPErrorCode != 0 { if res.IsError() && res.Error.HTTPErrorCode != 0 {
statusCode = res.Error.HTTPErrorCode statusCode = res.Error.HTTPErrorCode
} }
w.WriteHeader(statusCode) w.WriteHeader(statusCode)
enc := json.NewEncoder(w) ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
if err := enc.Encode(res); err != nil { if err := enc.Encode(res); err != nil {
log.Error("error writing rpc response", "err", err) log.Error("error writing rpc response", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return return
} }
httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc() httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc()
RecordResponsePayloadSize(ctx, ww.Len)
} }
func instrumentedHdlr(h http.Handler) http.HandlerFunc { func instrumentedHdlr(h http.Handler) http.HandlerFunc {
...@@ -271,3 +318,43 @@ func GetReqID(ctx context.Context) string { ...@@ -271,3 +318,43 @@ func GetReqID(ctx context.Context) string {
} }
return reqId return reqId
} }
func GetXForwardedFor(ctx context.Context) string {
xff, ok := ctx.Value(ContextKeyXForwardedFor).(string)
if !ok {
return ""
}
return xff
}
type recordLenReader struct {
io.Reader
Len int
}
func (r *recordLenReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.Len += n
return
}
type recordLenWriter struct {
io.Writer
Len int
}
func (w *recordLenWriter) Write(p []byte) (n int, err error) {
n, err = w.Writer.Write(p)
w.Len += n
return
}
type NoopRPCCache struct{}
func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
return nil, nil
}
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
return nil
}
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
bin/
testbin/
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
# Build the manager binary
FROM golang:1.16 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]
# VERSION defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# optimism-stacks.net/stackman-bundle:$VERSION and optimism-stacks.net/stackman-catalog:$VERSION.
IMAGE_TAG_BASE ?= optimism-stacks.net/stackman
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# Image URL to use all building/pushing image targets
IMG ?= ethereumoptimism/stackman-controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.22
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
fmt: ## Run go fmt against code.
go fmt ./...
vet: ## Run go vet against code.
go vet ./...
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
##@ Build
build: generate fmt vet ## Build manager binary.
go build -o bin/manager main.go
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
docker-build: test ## Build docker image with the manager.
docker build -t ${IMG} .
docker-push: ## Push docker image with the manager.
docker push ${IMG}
##@ Deployment
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl delete -f -
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/default | kubectl delete -f -
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
ENVTEST = $(shell pwd)/bin/setup-envtest
envtest: ## Download envtest-setup locally if necessary.
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
# go-get-tool will 'go get' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-get-tool
@[ -f $(1) ] || { \
set -e ;\
TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef
.PHONY: bundle
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
operator-sdk generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
operator-sdk bundle validate ./bundle
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
OPM = $(shell which opm)
endif
endif
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)
domain: optimism-stacks.net
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: stackman
repo: github.com/ethereum-optimism/optimism/go/stackman
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: CliqueL1
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: Deployer
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: DataTransportLayer
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: Sequencer
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: BatchSubmitter
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: GasOracle
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: optimism-stacks.net
group: stack
kind: Actor
path: github.com/ethereum-optimism/optimism/go/stackman/api/v1
version: v1
version: "3"
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ActorSpec defines the desired state of Actor
type ActorSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url"`
L2URL string `json:"l2_url"`
PrivateKey *Valuer `json:"private_key,omitempty"`
AddressManagerAddress string `json:"address_manager_address"`
TestFilename string `json:"test_filename,omitempty"`
Concurrency int `json:"concurrency,omitempty"`
RunForMS int `json:"run_for_ms,omitempty"`
RunCount int `json:"run_count,omitempty"`
ThinkTimeMS int `json:"think_time_ms,omitempty"`
Env []corev1.EnvVar `json:"env,omitempty"`
}
// ActorStatus defines the observed state of Actor
type ActorStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Actor is the Schema for the actors API
type Actor struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ActorSpec `json:"spec,omitempty"`
Status ActorStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// ActorList contains a list of Actor
type ActorList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Actor `json:"items"`
}
func init() {
SchemeBuilder.Register(&Actor{}, &ActorList{})
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// BatchSubmitterSpec defines the desired state of BatchSubmitter
type BatchSubmitterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url,omitempty"`
L1TimeoutSeconds int `json:"l1_timeout_seconds,omitempty"`
L2URL string `json:"l2_url,omitempty"`
L2TimeoutSeconds int `json:"l2_timeout_seconds,omitempty"`
DeployerURL string `json:"deployer_url,omitempty"`
DeployerTimeoutSeconds int `json:"deployer_timeout_seconds,omitempty"`
Env []corev1.EnvVar `json:"env,omitempty"`
}
// BatchSubmitterStatus defines the observed state of BatchSubmitter
type BatchSubmitterStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// BatchSubmitter is the Schema for the batchsubmitters API
type BatchSubmitter struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BatchSubmitterSpec `json:"spec,omitempty"`
Status BatchSubmitterStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// BatchSubmitterList contains a list of BatchSubmitter
type BatchSubmitterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []BatchSubmitter `json:"items"`
}
func init() {
SchemeBuilder.Register(&BatchSubmitter{}, &BatchSubmitterList{})
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// CliqueL1Spec defines the desired state of CliqueL1
type CliqueL1Spec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
GenesisFile *Valuer `json:"genesis_file,omitempty"`
SealerPrivateKey *Valuer `json:"sealer_private_key"`
SealerAddress string `json:"sealer_address,omitempty"`
ChainID int `json:"chain_id,omitempty"`
DataPVC *PVCConfig `json:"data_pvc,omitempty"`
AdditionalArgs []string `json:"additional_args,omitempty"`
}
// CliqueL1Status defines the observed state of CliqueL1
type CliqueL1Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// CliqueL1 is the Schema for the cliquel1s API
type CliqueL1 struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CliqueL1Spec `json:"spec,omitempty"`
Status CliqueL1Status `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// CliqueL1List contains a list of CliqueL1
type CliqueL1List struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CliqueL1 `json:"items"`
}
func init() {
SchemeBuilder.Register(&CliqueL1{}, &CliqueL1List{})
}
package v1
import (
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
type Valuer struct {
Value string `json:"value,omitempty"`
ValueFrom *corev1.EnvVarSource `json:"value_from,omitempty"`
}
func (v *Valuer) String() string {
out, err := yaml.Marshal(v)
if err != nil {
panic(err)
}
return string(out)
}
func (v *Valuer) EnvVar(name string) corev1.EnvVar {
return corev1.EnvVar{
Name: name,
Value: v.Value,
ValueFrom: v.ValueFrom,
}
}
type PVCConfig struct {
Name string `json:"name"`
Storage *resource.Quantity `json:"storage,omitempty"`
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// DataTransportLayerSpec defines the desired state of DataTransportLayer
type DataTransportLayerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url,omitempty"`
L1TimeoutSeconds int `json:"l1_timeout_seconds,omitempty"`
DeployerURL string `json:"deployer_url,omitempty"`
DeployerTimeoutSeconds int `json:"deployer_timeout_seconds,omitempty"`
DataPVC *PVCConfig `json:"data_pvc,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
}
// DataTransportLayerStatus defines the observed state of DataTransportLayer
type DataTransportLayerStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// DataTransportLayer is the Schema for the datatransportlayers API
type DataTransportLayer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataTransportLayerSpec `json:"spec,omitempty"`
Status DataTransportLayerStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// DataTransportLayerList contains a list of DataTransportLayer
type DataTransportLayerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DataTransportLayer `json:"items"`
}
func init() {
SchemeBuilder.Register(&DataTransportLayer{}, &DataTransportLayerList{})
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// DeployerSpec defines the desired state of Deployer
type DeployerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url,omitempty"`
L1TimeoutSeconds int `json:"l1_timeout_seconds,omitempty"`
Env []corev1.EnvVar `json:"env,omitempty"`
}
// DeployerStatus defines the observed state of Deployer
type DeployerStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Deployer is the Schema for the deployers API
type Deployer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DeployerSpec `json:"spec,omitempty"`
Status DeployerStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// DeployerList contains a list of Deployer
type DeployerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Deployer `json:"items"`
}
func init() {
SchemeBuilder.Register(&Deployer{}, &DeployerList{})
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// GasOracleSpec defines the desired state of GasOracle
type GasOracleSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url,omitempty"`
L1TimeoutSeconds int `json:"l1_timeout_seconds,omitempty"`
L2URL string `json:"l2_url,omitempty"`
L2TimeoutSeconds int `json:"l2_timeout_seconds,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
}
// GasOracleStatus defines the observed state of GasOracle
type GasOracleStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// GasOracle is the Schema for the gasoracles API
type GasOracle struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GasOracleSpec `json:"spec,omitempty"`
Status GasOracleStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// GasOracleList contains a list of GasOracle
type GasOracleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GasOracle `json:"items"`
}
func init() {
SchemeBuilder.Register(&GasOracle{}, &GasOracleList{})
}
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the stack v1 API group
//+kubebuilder:object:generate=true
//+groupName=stack.optimism-stacks.net
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "stack.optimism-stacks.net", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// SequencerSpec defines the desired state of Sequencer
type SequencerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Image string `json:"image,omitempty"`
L1URL string `json:"l1_url,omitempty"`
L1TimeoutSeconds int `json:"l1_timeout_seconds,omitempty"`
DeployerURL string `json:"deployer_url,omitempty"`
DeployerTimeoutSeconds int `json:"deployer_timeout_seconds,omitempty"`
DTLURL string `json:"dtl_url,omitempty"`
DTLTimeoutSeconds int `json:"dtl_timeout_seconds,omitempty"`
DataPVC *PVCConfig `json:"data_pvc,omitempty"`
Env []corev1.EnvVar `json:"env,omitempty"`
AdditionalArgs []string `json:"additional_args,omitempty"`
}
// SequencerStatus defines the observed state of Sequencer
type SequencerStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Sequencer is the Schema for the sequencers API
type Sequencer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SequencerSpec `json:"spec,omitempty"`
Status SequencerStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// SequencerList contains a list of Sequencer
type SequencerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Sequencer `json:"items"`
}
func init() {
SchemeBuilder.Register(&Sequencer{}, &SequencerList{})
}
This diff is collapsed.
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: cliquel1s.stack.optimism-stacks.net
spec:
group: stack.optimism-stacks.net
names:
kind: CliqueL1
listKind: CliqueL1List
plural: cliquel1s
singular: cliquel1
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: CliqueL1 is the Schema for the cliquel1s API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: CliqueL1Spec defines the desired state of CliqueL1
type: object
status:
description: CliqueL1Status defines the observed state of CliqueL1
properties:
pod_names:
items:
type: string
type: array
required:
- pod_names
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/stack.optimism-stacks.net_cliquel1s.yaml
- bases/stack.optimism-stacks.net_deployers.yaml
- bases/stack.optimism-stacks.net_datatransportlayers.yaml
- bases/stack.optimism-stacks.net_sequencers.yaml
- bases/stack.optimism-stacks.net_batchsubmitters.yaml
- bases/stack.optimism-stacks.net_gasoracles.yaml
- bases/stack.optimism-stacks.net_actors.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_cliquel1s.yaml
#- patches/webhook_in_deployers.yaml
#- patches/webhook_in_datatransportlayers.yaml
#- patches/webhook_in_sequencers.yaml
#- patches/webhook_in_batchsubmitters.yaml
#- patches/webhook_in_gasoracles.yaml
#- patches/webhook_in_actors.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_cliquel1s.yaml
#- patches/cainjection_in_deployers.yaml
#- patches/cainjection_in_datatransportlayers.yaml
#- patches/cainjection_in_sequencers.yaml
#- patches/cainjection_in_batchsubmitters.yaml
#- patches/cainjection_in_gasoracles.yaml
#- patches/cainjection_in_actors.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: actors.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: batchsubmitters.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: cliquel1s.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: datatransportlayers.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: deployers.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: gasoracles.stack.optimism-stacks.net
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: sequencers.stack.optimism-stacks.net
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: actors.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: batchsubmitters.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cliquel1s.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: datatransportlayers.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: deployers.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: gasoracles.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: sequencers.stack.optimism-stacks.net
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
# Adds namespace to all resources.
namespace: stackman-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: stackman-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
- ../prometheus
#patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
#- manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
ports:
- containerPort: 8443
protocol: TCP
name: https
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: 8103f40b.optimism-stacks.net
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- files:
- controller_manager_config.yaml
name: manager-config
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: controller
newTag: latest
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
labels:
control-plane: controller-manager
spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /manager
args:
- --leader-elect
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/stackman.clusterserviceversion.yaml
- ../default
- ../samples
- ../scorecard
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
#patchesJson6902:
#- target:
# group: apps
# version: v1
# kind: Deployment
# name: controller-manager
# namespace: system
# patch: |-
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/containers/1/volumeMounts/0
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/volumes/0
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager
# permissions for end users to edit actors.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: actor-editor-role
rules:
- apiGroups:
- stack.optimism-stacks.net
resources:
- actors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- stack.optimism-stacks.net
resources:
- actors/status
verbs:
- get
# permissions for end users to view actors.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: actor-viewer-role
rules:
- apiGroups:
- stack.optimism-stacks.net
resources:
- actors
verbs:
- get
- list
- watch
- apiGroups:
- stack.optimism-stacks.net
resources:
- actors/status
verbs:
- get
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment