Commit f23a7ea6 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/challenger/outputfilter

parents 145efd3a df11f76b
......@@ -198,13 +198,15 @@ The ability to pause the bridge in case of emergency means that in the worst cas
#### Unfreezing the bridge via L1 soft fork
In order to address the frozen funds, there is a potential final recovery mechanism we call the “L1 Soft Fork Upgrade Recovery” mechanism. This mechanism enables L1 to initiate a bridge upgrade with a soft fork, bypassing all other permissions within the Superchain bridge contracts. The mechanism is as follows:
In order to address the frozen funds, there is a potential final recovery mechanism which has been discussed by the L2 community which we call the “L1 Soft Fork Upgrade Recovery” mechanism. This mechanism enables L1 to initiate a bridge upgrade with a soft fork, bypassing all other permissions within the Superchain bridge contracts. This approach may [introduce systemic risk](https://vitalik.ca/general/2023/05/21/dont_overload.html) to Ethereum and requires research and community buy-in before implementation. It is not required for implementing the Superchain and is being documented for research completeness. Without further research into the implications and safety, it is not an approach the team currently endorses.
*Anyone* may propose an upgrade by submitting a transaction to a special bridge contract, along with a very large bond. This begins a two week challenge period. During this challenge period, *anyone* may submit a challenge which immediately *cancels* the upgrade and claims the bond. Under normal circumstances, it is impossible that an upgrade would go uncancelled for the required two weeks due to the large incentive provided for anyone to cancel the upgrade. However, if the upgrade is accompanied by a modification to Ethereum L1 validator software (the L1 soft fork), which ignores blocks that contain the cancellation transaction then it may succeed.
The mechanism is as follows:
While a successful upgrade of this type would represent a soft fork of Ethereum L1, it would not incur long term technical debt to the Ethereum codebase because the soft fork logic can be removed once the upgrade has completed.
*Anyone* may propose an upgrade by submitting a transaction to a special bridge contract, along with a very large bond. This begins a two week challenge period. During this challenge period, anyone may submit a challenge which immediately cancels the upgrade and claims the bond. Under normal circumstances, it is impossible that an upgrade would go uncancelled for the required two weeks due to the large incentive provided for anyone to cancel the upgrade. However, if the upgrade is accompanied by a modification to Ethereum L1 validator software (the L1 soft fork), which ignores blocks that contain the cancellation transaction then it may succeed.
We expect this escape hatch will never be used, but its very existence should deter malicious behavior.
While a successful upgrade of this type would represent a soft fork of Ethereum L1, it would not incur long term technical debt to the Ethereum codebase because the soft fork logic can be removed once the upgrade has completed.
We expect this escape hatch will never be used, but its very existence could deter malicious behavior.
### The combination of these features results in a system satisfying the core Superchain properties
......
......@@ -24,6 +24,7 @@ require (
github.com/libp2p/go-libp2p-pubsub v0.9.0
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/mattn/go-isatty v0.0.17
github.com/multiformats/go-base32 v0.1.0
github.com/multiformats/go-multiaddr v0.8.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
......@@ -128,7 +129,6 @@ require (
github.com/moby/term v0.0.0-20221105221325-4eb28fa6025c // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect
......
package store
import (
"context"
"errors"
"fmt"
ds "github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p/core/peerstore"
)
type extendedStore struct {
peerstore.Peerstore
peerstore.CertifiedAddrBook
*scoreBook
}
func NewExtendedPeerstore(ctx context.Context, ps peerstore.Peerstore, store ds.Batching) (ExtendedPeerstore, error) {
cab, ok := peerstore.GetCertifiedAddrBook(ps)
if !ok {
return nil, errors.New("peerstore should also be a certified address book")
}
sb, err := newScoreBook(ctx, store)
if err != nil {
return nil, fmt.Errorf("create scorebook: %w", err)
}
return &extendedStore{
Peerstore: ps,
CertifiedAddrBook: cab,
scoreBook: sb,
}, nil
}
var _ ExtendedPeerstore = (*extendedStore)(nil)
package store
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
)
type PeerScores struct {
Gossip float64
}
type ScoreType int
const (
TypeGossip ScoreType = iota
)
// ScoreDatastore defines a type-safe API for getting and setting libp2p peer score information
type ScoreDatastore interface {
// GetPeerScores returns the current scores for the specified peer
GetPeerScores(id peer.ID) (PeerScores, error)
// SetScore stores the latest score for the specified peer and score type
SetScore(id peer.ID, scoreType ScoreType, score float64) error
}
// ExtendedPeerstore defines a type-safe API to work with additional peer metadata based on a libp2p peerstore.Peerstore
type ExtendedPeerstore interface {
peerstore.Peerstore
ScoreDatastore
peerstore.CertifiedAddrBook
}
package store
import (
"context"
"errors"
"fmt"
"sync"
lru "github.com/hashicorp/golang-lru/v2"
ds "github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-base32"
)
type scoreBook struct {
ctx context.Context
store ds.Batching
cache *lru.Cache[peer.ID, PeerScores]
sync.RWMutex
}
var scoresBase = ds.NewKey("/peers/scores")
const (
scoreDataV0 = "0"
scoreCacheSize = 100
)
func newScoreBook(ctx context.Context, store ds.Batching) (*scoreBook, error) {
cache, err := lru.New[peer.ID, PeerScores](scoreCacheSize)
if err != nil {
return nil, fmt.Errorf("creating cache: %w", err)
}
return &scoreBook{
ctx: ctx,
store: store,
cache: cache,
}, nil
}
func (d *scoreBook) GetPeerScores(id peer.ID) (PeerScores, error) {
d.RLock()
defer d.RUnlock()
return d.getPeerScoresNoLock(id)
}
func (d *scoreBook) getPeerScoresNoLock(id peer.ID) (PeerScores, error) {
scores, ok := d.cache.Get(id)
if ok {
return scores, nil
}
data, err := d.store.Get(d.ctx, scoreKey(id))
if errors.Is(err, ds.ErrNotFound) {
return PeerScores{}, nil
} else if err != nil {
return PeerScores{}, fmt.Errorf("load scores for peer %v: %w", id, err)
}
scores, err = deserializeScoresV0(data)
if err != nil {
return PeerScores{}, fmt.Errorf("invalid score data for peer %v: %w", id, err)
}
d.cache.Add(id, scores)
return scores, nil
}
func (d *scoreBook) SetScore(id peer.ID, scoreType ScoreType, score float64) error {
d.Lock()
defer d.Unlock()
scores, err := d.getPeerScoresNoLock(id)
if err != nil {
return err
}
switch scoreType {
case TypeGossip:
scores.Gossip = score
default:
return fmt.Errorf("unknown score type: %v", scoreType)
}
data, err := serializeScoresV0(scores)
if err != nil {
return fmt.Errorf("encode scores for peer %v: %w", id, err)
}
err = d.store.Put(d.ctx, scoreKey(id), data)
if err != nil {
return fmt.Errorf("storing updated scores for peer %v: %w", id, err)
}
d.cache.Add(id, scores)
return nil
}
func scoreKey(id peer.ID) ds.Key {
return scoresBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(id))).ChildString(scoreDataV0)
}
package store
import (
"context"
"testing"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/sync"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
"github.com/stretchr/testify/require"
)
func TestGetEmptyScoreComponents(t *testing.T) {
id := peer.ID("aaaa")
store := createMemoryStore(t)
assertPeerScores(t, store, id, PeerScores{})
}
func TestRoundTripGossipScore(t *testing.T) {
id := peer.ID("aaaa")
store := createMemoryStore(t)
score := 123.45
err := store.SetScore(id, TypeGossip, score)
require.NoError(t, err)
assertPeerScores(t, store, id, PeerScores{Gossip: score})
}
func TestUpdateGossipScore(t *testing.T) {
id := peer.ID("aaaa")
store := createMemoryStore(t)
score := 123.45
require.NoError(t, store.SetScore(id, TypeGossip, 444.223))
require.NoError(t, store.SetScore(id, TypeGossip, score))
assertPeerScores(t, store, id, PeerScores{Gossip: score})
}
func TestStoreScoresForMultiplePeers(t *testing.T) {
id1 := peer.ID("aaaa")
id2 := peer.ID("bbbb")
store := createMemoryStore(t)
score1 := 123.45
score2 := 453.22
require.NoError(t, store.SetScore(id1, TypeGossip, score1))
require.NoError(t, store.SetScore(id2, TypeGossip, score2))
assertPeerScores(t, store, id1, PeerScores{Gossip: score1})
assertPeerScores(t, store, id2, PeerScores{Gossip: score2})
}
func TestPersistData(t *testing.T) {
id := peer.ID("aaaa")
score := 123.45
backingStore := sync.MutexWrap(ds.NewMapDatastore())
store := createPeerstoreWithBacking(t, backingStore)
require.NoError(t, store.SetScore(id, TypeGossip, score))
// Close and recreate a new store from the same backing
require.NoError(t, store.Close())
store = createPeerstoreWithBacking(t, backingStore)
assertPeerScores(t, store, id, PeerScores{Gossip: score})
}
func TestUnknownScoreType(t *testing.T) {
store := createMemoryStore(t)
err := store.SetScore("aaaa", 92832, 244.24)
require.ErrorContains(t, err, "unknown score type")
}
func assertPeerScores(t *testing.T, store ExtendedPeerstore, id peer.ID, expected PeerScores) {
result, err := store.GetPeerScores(id)
require.NoError(t, err)
require.Equal(t, result, expected)
}
func createMemoryStore(t *testing.T) ExtendedPeerstore {
store := sync.MutexWrap(ds.NewMapDatastore())
return createPeerstoreWithBacking(t, store)
}
func createPeerstoreWithBacking(t *testing.T, store *sync.MutexDatastore) ExtendedPeerstore {
ps, err := pstoreds.NewPeerstore(context.Background(), store, pstoreds.DefaultOpts())
require.NoError(t, err, "Failed to create peerstore")
eps, err := NewExtendedPeerstore(context.Background(), ps, store)
require.NoError(t, err)
return eps
}
package store
import (
"bytes"
"encoding/binary"
)
func serializeScoresV0(scores PeerScores) ([]byte, error) {
var b bytes.Buffer
err := binary.Write(&b, binary.BigEndian, scores.Gossip)
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
func deserializeScoresV0(data []byte) (PeerScores, error) {
var scores PeerScores
r := bytes.NewReader(data)
err := binary.Read(r, binary.BigEndian, &scores.Gossip)
if err != nil {
return PeerScores{}, err
}
return scores, nil
}
package store
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestRoundtripScoresV0(t *testing.T) {
scores := PeerScores{
Gossip: 1234.52382,
}
data, err := serializeScoresV0(scores)
require.NoError(t, err)
result, err := deserializeScoresV0(data)
require.NoError(t, err)
require.Equal(t, scores, result)
}
// TestParseHistoricSerializations checks that existing data can still be deserialized
// Adding new fields should not require bumping the version, only removing fields
// A new entry should be added to this test each time any fields are changed to ensure it can always be deserialized
func TestParseHistoricSerializationsV0(t *testing.T) {
tests := []struct {
name string
data []byte
expected PeerScores
}{
{
name: "GossipOnly",
data: common.Hex2Bytes("40934A18644523F6"),
expected: PeerScores{Gossip: 1234.52382},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result, err := deserializeScoresV0(test.data)
require.NoError(t, err)
require.Equal(t, test.expected, result)
})
}
}
// Package clock provides an abstraction for time to enable testing of functionality that uses time as an input.
package clock
import "time"
// Clock represents time in a way that can be provided by varying implementations.
// Methods are designed to be direct replacements for methods in the time package.
type Clock interface {
// Now provides the current local time. Equivalent to time.Now
Now() time.Time
// After waits for the duration to elapse and then sends the current time on the returned channel.
// It is equivalent to time.After
After(d time.Duration) <-chan time.Time
// NewTicker returns a new Ticker containing a channel that will send
// the current time on the channel after each tick. The period of the
// ticks is specified by the duration argument. The ticker will adjust
// the time interval or drop ticks to make up for slow receivers.
// The duration d must be greater than zero; if not, NewTicker will
// panic. Stop the ticker to release associated resources.
NewTicker(d time.Duration) Ticker
}
// A Ticker holds a channel that delivers "ticks" of a clock at intervals
type Ticker interface {
// Ch returns the channel for the ticker. Equivalent to time.Ticker.C
Ch() <-chan time.Time
// Stop turns off a ticker. After Stop, no more ticks will be sent.
// Stop does not close the channel, to prevent a concurrent goroutine
// reading from the channel from seeing an erroneous "tick".
Stop()
// Reset stops a ticker and resets its period to the specified duration.
// The next tick will arrive after the new period elapses. The duration d
// must be greater than zero; if not, Reset will panic.
Reset(d time.Duration)
}
// SystemClock provides an instance of Clock that uses the system clock via methods in the time package.
var SystemClock Clock = systemClock{}
type systemClock struct {
}
func (s systemClock) Now() time.Time {
return time.Now()
}
func (s systemClock) After(d time.Duration) <-chan time.Time {
return time.After(d)
}
type SystemTicker struct {
*time.Ticker
}
func (t *SystemTicker) Ch() <-chan time.Time {
return t.C
}
func (s systemClock) NewTicker(d time.Duration) Ticker {
return &SystemTicker{time.NewTicker(d)}
}
package clock
import (
"context"
"sync"
"time"
)
type action interface {
// Return true if the action is due to fire
isDue(time.Time) bool
// fire triggers the action. Returns true if the action needs to fire again in the future
fire(time.Time) bool
}
type task struct {
ch chan time.Time
due time.Time
}
func (t task) isDue(now time.Time) bool {
return !t.due.After(now)
}
func (t task) fire(now time.Time) bool {
t.ch <- now
close(t.ch)
return false
}
type ticker struct {
c Clock
ch chan time.Time
nextDue time.Time
period time.Duration
stopped bool
sync.Mutex
}
func (t *ticker) Ch() <-chan time.Time {
return t.ch
}
func (t *ticker) Stop() {
t.Lock()
defer t.Unlock()
t.stopped = true
}
func (t *ticker) Reset(d time.Duration) {
if d <= 0 {
panic("Continuously firing tickers are a really bad idea")
}
t.Lock()
defer t.Unlock()
t.period = d
t.nextDue = t.c.Now().Add(d)
}
func (t *ticker) isDue(now time.Time) bool {
t.Lock()
defer t.Unlock()
return !t.nextDue.After(now)
}
func (t *ticker) fire(now time.Time) bool {
t.Lock()
defer t.Unlock()
if t.stopped {
return false
}
t.ch <- now
t.nextDue = now.Add(t.period)
return true
}
type DeterministicClock struct {
now time.Time
pending []action
newPendingCh chan struct{}
lock sync.Mutex
}
// NewDeterministicClock creates a new clock where time only advances when the DeterministicClock.AdvanceTime method is called.
// This is intended for use in situations where a deterministic clock is required, such as testing or event driven systems.
func NewDeterministicClock(now time.Time) *DeterministicClock {
return &DeterministicClock{
now: now,
newPendingCh: make(chan struct{}, 1),
}
}
func (s *DeterministicClock) Now() time.Time {
s.lock.Lock()
defer s.lock.Unlock()
return s.now
}
func (s *DeterministicClock) After(d time.Duration) <-chan time.Time {
s.lock.Lock()
defer s.lock.Unlock()
ch := make(chan time.Time, 1)
if d.Nanoseconds() == 0 {
ch <- s.now
close(ch)
} else {
s.addPending(&task{ch: ch, due: s.now.Add(d)})
}
return ch
}
func (s *DeterministicClock) NewTicker(d time.Duration) Ticker {
if d <= 0 {
panic("Continuously firing tickers are a really bad idea")
}
s.lock.Lock()
defer s.lock.Unlock()
ch := make(chan time.Time, 1)
t := &ticker{
c: s,
ch: ch,
nextDue: s.now.Add(d),
period: d,
}
s.addPending(t)
return t
}
func (s *DeterministicClock) addPending(t action) {
s.pending = append(s.pending, t)
select {
case s.newPendingCh <- struct{}{}:
default:
// Must already have a new pending task flagged, do nothing
}
}
// WaitForNewPendingTask blocks until a new task is scheduled since the last time this method was called.
// true is returned if a new task was scheduled, false if the context completed before a new task was added.
func (s *DeterministicClock) WaitForNewPendingTask(ctx context.Context) bool {
select {
case <-ctx.Done():
return false
case <-s.newPendingCh:
return true
}
}
// AdvanceTime moves the time forward by the specific duration
func (s *DeterministicClock) AdvanceTime(d time.Duration) {
s.lock.Lock()
defer s.lock.Unlock()
s.now = s.now.Add(d)
var remaining []action
for _, a := range s.pending {
if !a.isDue(s.now) || a.fire(s.now) {
remaining = append(remaining, a)
}
}
s.pending = remaining
}
var _ Clock = (*DeterministicClock)(nil)
package clock
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestNowReturnsCurrentTime(t *testing.T) {
now := time.UnixMilli(23829382)
clock := NewDeterministicClock(now)
require.Equal(t, now, clock.Now())
}
func TestAdvanceTime(t *testing.T) {
start := time.UnixMilli(1000)
clock := NewDeterministicClock(start)
clock.AdvanceTime(500 * time.Millisecond)
require.Equal(t, start.Add(500*time.Millisecond), clock.Now())
}
func TestAfter(t *testing.T) {
t.Run("ZeroCompletesImmediately", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ch := clock.After(0)
require.Len(t, ch, 1, "duration should already have been reached")
})
t.Run("CompletesWhenTimeAdvances", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ch := clock.After(500 * time.Millisecond)
require.Len(t, ch, 0, "should not complete immediately")
clock.AdvanceTime(499 * time.Millisecond)
require.Len(t, ch, 0, "should not complete before time is due")
clock.AdvanceTime(1 * time.Millisecond)
require.Len(t, ch, 1, "should complete when time is reached")
require.Equal(t, clock.Now(), <-ch)
})
t.Run("CompletesWhenTimeAdvancesPastDue", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ch := clock.After(500 * time.Millisecond)
require.Len(t, ch, 0, "should not complete immediately")
clock.AdvanceTime(9000 * time.Millisecond)
require.Len(t, ch, 1, "should complete when time is past")
require.Equal(t, clock.Now(), <-ch)
})
t.Run("RegisterAsPending", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
_ = clock.After(500 * time.Millisecond)
ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFunc()
require.True(t, clock.WaitForNewPendingTask(ctx), "should have added a new pending task")
})
}
func TestNewTicker(t *testing.T) {
t.Run("FiresAfterEachDuration", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately")
clock.AdvanceTime(4 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire before due")
clock.AdvanceTime(1 * time.Second)
require.Len(t, ticker.Ch(), 1, "should fire when due")
require.Equal(t, clock.Now(), <-ticker.Ch(), "should post current time")
clock.AdvanceTime(4 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not re-fire before due")
clock.AdvanceTime(1 * time.Second)
require.Len(t, ticker.Ch(), 1, "should fire when due")
require.Equal(t, clock.Now(), <-ticker.Ch(), "should post current time")
})
t.Run("SkipsFiringWhenAdvancedMultipleDurations", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately")
// Advance more than three periods, but should still only fire once
clock.AdvanceTime(16 * time.Second)
require.Len(t, ticker.Ch(), 1, "should fire when due")
require.Equal(t, clock.Now(), <-ticker.Ch(), "should post current time")
clock.AdvanceTime(1 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire until due again")
})
t.Run("StopFiring", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
ticker.Stop()
clock.AdvanceTime(10 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire after stop")
})
t.Run("ResetPanicWhenLessNotPositive", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
require.Panics(t, func() {
ticker.Reset(0)
}, "reset to 0 should panic")
require.Panics(t, func() {
ticker.Reset(-1)
}, "reset to negative duration should panic")
})
t.Run("ResetWithShorterPeriod", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately")
ticker.Reset(1 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately after reset")
clock.AdvanceTime(1 * time.Second)
require.Len(t, ticker.Ch(), 1, "should fire when new duration reached")
require.Equal(t, clock.Now(), <-ticker.Ch(), "should post current time")
})
t.Run("ResetWithLongerPeriod", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately")
ticker.Reset(7 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire immediately after reset")
clock.AdvanceTime(5 * time.Second)
require.Len(t, ticker.Ch(), 0, "should not fire when old duration reached")
clock.AdvanceTime(2 * time.Second)
require.Len(t, ticker.Ch(), 1, "should fire when new duration reached")
require.Equal(t, clock.Now(), <-ticker.Ch(), "should post current time")
})
t.Run("RegisterAsPending", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ticker := clock.NewTicker(5 * time.Second)
defer ticker.Stop()
ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFunc()
require.True(t, clock.WaitForNewPendingTask(ctx), "should have added a new pending task")
})
}
func TestWaitForPending(t *testing.T) {
t.Run("DoNotBlockWhenAlreadyPending", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
_ = clock.After(5 * time.Minute)
_ = clock.After(5 * time.Minute)
ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFunc()
require.True(t, clock.WaitForNewPendingTask(ctx), "should have added a new pending task")
})
t.Run("ResetNewPendingFlagAfterWaiting", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
_ = clock.After(5 * time.Minute)
_ = clock.After(5 * time.Minute)
ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFunc()
require.True(t, clock.WaitForNewPendingTask(ctx), "should have added a new pending task")
ctx, cancelFunc = context.WithTimeout(context.Background(), 250*time.Millisecond)
defer cancelFunc()
require.False(t, clock.WaitForNewPendingTask(ctx), "should have reset new pending task flag")
})
}
......@@ -26,9 +26,7 @@ type ServerConfig struct {
}
type CacheConfig struct {
Enabled bool `toml:"enabled"`
BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
NumBlockConfirmations int `toml:"num_block_confirmations"`
Enabled bool `toml:"enabled"`
}
type RedisConfig struct {
......
......@@ -10,8 +10,6 @@ namespace = "proxyd"
[cache]
enabled = true
block_sync_rpc_url = "$GOOD_BACKEND_RPC_URL"
[backends]
[backends.good]
......
......@@ -9,7 +9,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/go-redis/redis/v8"
"github.com/prometheus/client_golang/prometheus/promhttp"
......@@ -206,27 +205,12 @@ func Start(config *Config) (*Server, func(), error) {
rpcCache RPCCache
)
if config.Cache.Enabled {
if config.Cache.BlockSyncRPCURL == "" {
return nil, nil, fmt.Errorf("block sync node required for caching")
}
blockSyncRPCURL, err := ReadFromEnvOrConfig(config.Cache.BlockSyncRPCURL)
if err != nil {
return nil, nil, err
}
if redisClient == nil {
log.Warn("redis is not configured, using in-memory cache")
cache = newMemoryCache()
} else {
cache = newRedisCache(redisClient, config.Redis.Namespace)
}
// Ideally, the BlocKSyncRPCURL should be the sequencer or a HA replica that's not far behind
ethClient, err := ethclient.Dial(blockSyncRPCURL)
if err != nil {
return nil, nil, err
}
defer ethClient.Close()
rpcCache = newRPCCache(newCacheWithCompression(cache))
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment