Commit a4689784 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into pops/main/challenger

parents ce092bde 351a53e8
......@@ -3,6 +3,7 @@ version: 2.1
orbs:
go: circleci/go@1.5.0
gcp-cli: circleci/gcp-cli@3.0.1
slack: circleci/slack@4.10.1
commands:
gcp-oidc-authenticate:
description: "Authenticate with GCP using a CircleCI OIDC token."
......@@ -611,10 +612,20 @@ jobs:
- run:
name: markdown lint
command: yarn lint:specs:check
bedrock-markdown-links:
machine:
image: ubuntu-2204:2022.07.1
steps:
- checkout
- run:
name: link lint
command: |
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback --exclude twitter.com --exclude-mail /input/README.md "/input/specs/**/*.md"
make bedrock-markdown-links
- slack/notify:
channel: C055R639XT9 #notify-link-check
event: fail
template: basic_fail_1
fuzz-op-node:
docker:
......@@ -1105,6 +1116,21 @@ jobs:
steps:
- run: echo Done
fpp-verify:
docker:
- image: cimg/go:1.19
steps:
- checkout
- run:
name: verify-goerli
command: |
make verify-goerli
working_directory: op-program
- slack/notify:
channel: C03N11M0BBN
event: fail
template: basic_fail_1
workflows:
main:
jobs:
......@@ -1560,6 +1586,21 @@ workflows:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: proxyd-docker-release
filters:
tags:
only: /^proxyd\/v.*/
branches:
ignore: /.*/
docker_file: proxyd/Dockerfile
docker_name: proxyd
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
context:
- oplabs-gcr-release
requires:
- hold
release-ci-builder:
jobs:
- docker-publish:
......@@ -1575,3 +1616,28 @@ workflows:
docker_context: ./ops/docker/ci-builder
context:
- oplabs-gcr
scheduled-fpp:
triggers:
- schedule:
# run every 4 hours
cron: "0 0,6,12,18 * * *"
filters:
branches:
only: [ "develop" ]
jobs:
- fpp-verify:
context:
- slack
- oplabs-fpp-nodes
scheduled-link-check:
triggers:
- schedule:
# Run once a day, only on the develop branch
cron: "0 0 * * *"
filters:
branches:
only: [ "develop" ]
jobs:
- bedrock-markdown-links:
context: slack
......@@ -120,3 +120,6 @@ tag-bedrock-go-modules:
update-op-geth:
./ops/scripts/update-op-geth.py
.PHONY: update-op-geth
bedrock-markdown-links:
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback --exclude twitter.com --exclude-mail /input/README.md "/input/specs/**/*.md"
......@@ -33,6 +33,9 @@ test:
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
verify-goerli: op-program-host op-program-client
env GO111MODULE=on go run ./verify/cmd/goerli.go $$L1URL $$L2URL
.PHONY: \
op-program \
clean \
......
package main
import (
"context"
"fmt"
"math/big"
"os"
"os/exec"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
)
const agreedBlockTrailingDistance = 100
func main() {
if len(os.Args) != 3 {
_, _ = fmt.Fprintln(os.Stderr, "Must specify L1 RPC URL and L2 RPC URL as arguments")
os.Exit(2)
}
l1RpcUrl := os.Args[1]
l2RpcUrl := os.Args[2]
goerliOutputAddress := common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0")
err := Run(l1RpcUrl, l2RpcUrl, goerliOutputAddress)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Failed: %v\n", err.Error())
os.Exit(1)
}
}
func Run(l1RpcUrl string, l2RpcUrl string, l2OracleAddr common.Address) error {
ctx := context.Background()
l1RpcClient, err := rpc.Dial(l1RpcUrl)
if err != nil {
return fmt.Errorf("dial L1 client: %w", err)
}
l1Client := ethclient.NewClient(l1RpcClient)
l2RpcClient, err := rpc.Dial(l2RpcUrl)
if err != nil {
return fmt.Errorf("dial L2 client: %w", err)
}
l2Client := ethclient.NewClient(l2RpcClient)
outputOracle, err := bindings.NewL2OutputOracle(l2OracleAddr, l1Client)
if err != nil {
return fmt.Errorf("create output oracle bindings: %w", err)
}
// Find L2 finalized head. This is far enough back that we know it's submitted to L1 and won't be re-orged
l2FinalizedHead, err := l2Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
if err != nil {
return fmt.Errorf("get l2 safe head: %w", err)
}
// Find L1 finalized block. Can't be re-orged and must contain all batches for the L2 finalized block
l1BlockNum := big.NewInt(int64(rpc.FinalizedBlockNumber))
l1HeadBlock, err := l1Client.BlockByNumber(ctx, l1BlockNum)
if err != nil {
return fmt.Errorf("find L1 head: %w", err)
}
// Get the most published L2 output from before the finalized block
callOpts := &bind.CallOpts{Context: ctx}
outputIndex, err := outputOracle.GetL2OutputIndexAfter(callOpts, l2FinalizedHead.Number())
if err != nil {
return fmt.Errorf("get output index after finalized block: %w", err)
}
outputIndex = outputIndex.Sub(outputIndex, big.NewInt(1))
output, err := outputOracle.GetL2Output(callOpts, outputIndex)
if err != nil {
return fmt.Errorf("retrieve latest output: %w", err)
}
l1Head := l1HeadBlock.Hash()
l2Claim := common.Hash(output.OutputRoot)
l2BlockNumber := output.L2BlockNumber
// Use an agreed starting L2 block some distance before the block the output claim is from
agreedBlockNumber := uint64(0)
if l2BlockNumber.Uint64() > agreedBlockTrailingDistance {
agreedBlockNumber = l2BlockNumber.Uint64() - agreedBlockTrailingDistance
}
l2AgreedBlock, err := l2Client.BlockByNumber(ctx, big.NewInt(int64(agreedBlockNumber)))
if err != nil {
return fmt.Errorf("retrieve agreed l2 block: %w", err)
}
l2Head := l2AgreedBlock.Hash()
temp, err := os.MkdirTemp("", "oracledata")
if err != nil {
return fmt.Errorf("create temp dir: %w", err)
}
defer func() {
err := os.RemoveAll(temp)
if err != nil {
println("Failed to remove temp dir:" + err.Error())
}
}()
fmt.Printf("Using temp dir: %s\n", temp)
args := []string{
"--network", "goerli",
"--exec", "./bin/op-program-client",
"--datadir", temp,
"--l1.head", l1Head.Hex(),
"--l2.head", l2Head.Hex(),
"--l2.claim", l2Claim.Hex(),
"--l2.blocknumber", l2BlockNumber.String(),
}
fmt.Printf("Configuration: %s\n", args)
fmt.Println("Running in online mode")
err = runFaultProofProgram(ctx, append(args, "--l1", l1RpcUrl, "--l2", l2RpcUrl))
if err != nil {
return fmt.Errorf("online mode failed: %w", err)
}
fmt.Println("Running in offline mode")
err = runFaultProofProgram(ctx, args)
if err != nil {
return fmt.Errorf("offline mode failed: %w", err)
}
return nil
}
func runFaultProofProgram(ctx context.Context, args []string) error {
ctx, cancel := context.WithTimeout(ctx, 30*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, "./bin/op-program", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
......@@ -132,6 +132,8 @@ type Backend struct {
stripTrailingXFF bool
proxydIP string
skipPeerCountCheck bool
maxDegradedLatencyThreshold time.Duration
maxLatencyThreshold time.Duration
maxErrorRateThreshold float64
......@@ -207,6 +209,12 @@ func WithProxydIP(ip string) BackendOpt {
}
}
func WithSkipPeerCountCheck(skipPeerCountCheck bool) BackendOpt {
return func(b *Backend) {
b.skipPeerCountCheck = skipPeerCountCheck
}
}
func WithMaxDegradedLatencyThreshold(maxDegradedLatencyThreshold time.Duration) BackendOpt {
return func(b *Backend) {
b.maxDegradedLatencyThreshold = maxDegradedLatencyThreshold
......
......@@ -81,17 +81,18 @@ type BackendOptions struct {
}
type BackendConfig struct {
Username string `toml:"username"`
Password string `toml:"password"`
RPCURL string `toml:"rpc_url"`
WSURL string `toml:"ws_url"`
WSPort int `toml:"ws_port"`
MaxRPS int `toml:"max_rps"`
MaxWSConns int `toml:"max_ws_conns"`
CAFile string `toml:"ca_file"`
ClientCertFile string `toml:"client_cert_file"`
ClientKeyFile string `toml:"client_key_file"`
StripTrailingXFF bool `toml:"strip_trailing_xff"`
Username string `toml:"username"`
Password string `toml:"password"`
RPCURL string `toml:"rpc_url"`
WSURL string `toml:"ws_url"`
WSPort int `toml:"ws_port"`
MaxRPS int `toml:"max_rps"`
MaxWSConns int `toml:"max_ws_conns"`
CAFile string `toml:"ca_file"`
ClientCertFile string `toml:"client_cert_file"`
ClientKeyFile string `toml:"client_key_file"`
StripTrailingXFF bool `toml:"strip_trailing_xff"`
SkipPeerCountCheck bool `toml:"consensus_skip_peer_count"`
}
type BackendsConfig map[string]*BackendConfig
......
......@@ -205,7 +205,7 @@ func NewConsensusPoller(bg *BackendGroup, opts ...ConsensusOpt) *ConsensusPoller
func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
bs := cp.backendState[be]
if time.Now().Before(bs.bannedUntil) {
log.Warn("skipping backend banned", "backend", be.Name, "bannedUntil", bs.bannedUntil)
log.Debug("skipping backend banned", "backend", be.Name, "bannedUntil", bs.bannedUntil)
return
}
......@@ -227,10 +227,13 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
return
}
peerCount, err := cp.getPeerCount(ctx, be)
if err != nil {
log.Warn("error updating backend", "name", be.Name, "err", err)
return
var peerCount uint64
if !be.skipPeerCountCheck {
peerCount, err = cp.getPeerCount(ctx, be)
if err != nil {
log.Warn("error updating backend", "name", be.Name, "err", err)
return
}
}
latestBlockNumber, latestBlockHash, err := cp.fetchBlock(ctx, be, "latest")
......@@ -243,7 +246,7 @@ func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
if changed {
RecordBackendLatestBlock(be, latestBlockNumber)
log.Info("backend state updated", "name", be.Name, "state", bs)
log.Debug("backend state updated", "name", be.Name, "state", bs)
}
}
......@@ -257,7 +260,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
for _, be := range cp.backendGroup.Backends {
peerCount, backendLatestBlockNumber, backendLatestBlockHash, lastUpdate := cp.getBackendState(be)
if peerCount < cp.minPeerCount {
if !be.skipPeerCountCheck && peerCount < cp.minPeerCount {
continue
}
if lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now()) {
......@@ -285,7 +288,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
filteredBackendsNames := make([]string, 0, len(cp.backendGroup.Backends))
if lowestBlock > currentConsensusBlockNumber {
log.Info("validating consensus on block", lowestBlock)
log.Debug("validating consensus on block", "lowestBlock", lowestBlock)
}
broken := false
......@@ -306,7 +309,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
bs := cp.backendState[be]
notUpdated := bs.lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now())
isBanned := time.Now().Before(bs.bannedUntil)
notEnoughPeers := bs.peerCount < cp.minPeerCount
notEnoughPeers := !be.skipPeerCountCheck && bs.peerCount < cp.minPeerCount
if !be.IsHealthy() || be.IsRateLimited() || !be.Online() || notUpdated || isBanned || notEnoughPeers {
filteredBackendsNames = append(filteredBackendsNames, be.Name)
continue
......@@ -338,7 +341,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
// walk one block behind and try again
proposedBlock -= 1
proposedBlockHash = ""
log.Info("no consensus, now trying", "block:", proposedBlock)
log.Debug("no consensus, now trying", "block:", proposedBlock)
}
}
......@@ -353,7 +356,7 @@ func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
cp.consensusGroup = consensusBackends
cp.consensusGroupMux.Unlock()
log.Info("group state", "proposedBlock", proposedBlock, "consensusBackends", strings.Join(consensusBackendsNames, ", "), "filteredBackends", strings.Join(filteredBackendsNames, ", "))
log.Debug("group state", "proposedBlock", proposedBlock, "consensusBackends", strings.Join(consensusBackendsNames, ", "), "filteredBackends", strings.Join(filteredBackendsNames, ", "))
}
// Unban remove any bans from the backends
......@@ -384,7 +387,7 @@ func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block st
return
}
// isSyncing Convenient wrapper to check if the backend is syncing from the network
// getPeerCount Convenient wrapper to retrieve the current peer count from the backend
func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) {
var rpcRes RPCRes
err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount")
......
......@@ -72,6 +72,9 @@ ca_file = ""
client_cert_file = ""
# Path to a custom client key file.
client_key_file = ""
# Allows backends to skip peer count checking, default false
# consensus_skip_peer_count = true
[backends.alchemy]
rpc_url = ""
......
......@@ -157,6 +157,7 @@ func Start(config *Config) (*Server, func(), error) {
opts = append(opts, WithStrippedTrailingXFF())
}
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
opts = append(opts, WithSkipPeerCountCheck(cfg.SkipPeerCountCheck))
back := NewBackend(name, rpcURL, wsURL, lim, rpcRequestSemaphore, opts...)
backendNames = append(backendNames, name)
......
......@@ -3,9 +3,9 @@ package proxyd
import (
"encoding/json"
"errors"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
)
type RewriteContext struct {
......@@ -159,15 +159,21 @@ func rewriteTagMap(rctx RewriteContext, m map[string]interface{}, key string) (b
}
func rewriteTag(rctx RewriteContext, current string) (string, bool, error) {
if current == "latest" {
jv, err := json.Marshal(current)
if err != nil {
return "", false, err
}
var bnh rpc.BlockNumberOrHash
err = bnh.UnmarshalJSON(jv)
if err != nil {
return "", false, err
}
if bnh.BlockNumber != nil && *bnh.BlockNumber == rpc.LatestBlockNumber {
return rctx.latest.String(), true, nil
} else if strings.HasPrefix(current, "0x") {
decode, err := hexutil.DecodeUint64(current)
if err != nil {
return current, false, err
}
b := hexutil.Uint64(decode)
if b > rctx.latest {
} else if bnh.BlockNumber != nil {
if hexutil.Uint64(bnh.BlockNumber.Int64()) > rctx.latest {
return "", false, ErrRewriteBlockOutOfRange
}
}
......
......@@ -334,6 +334,18 @@ func TestRewriteRequest(t *testing.T) {
expected: RewriteOverrideError,
expectedErr: ErrRewriteBlockOutOfRange,
},
{
name: "eth_getStorageAt using rpc.BlockNumberOrHash",
args: args{
rctx: RewriteContext{latest: hexutil.Uint64(100)},
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
"0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08",
"0x1c4840bcb3de3ac403c0075b46c2c47d4396c5b624b6e1b2874ec04e8879b483"})},
res: nil,
},
expected: RewriteNone,
},
}
// generalize tests for other methods with same interface and behavior
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment