Commit 52fdff78 authored by protolambda's avatar protolambda

drop unused db from batcher

parent adec5b40
...@@ -12,7 +12,6 @@ import ( ...@@ -12,7 +12,6 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ethereum-optimism/optimism/op-batcher/db"
"github.com/ethereum-optimism/optimism/op-batcher/sequencer" "github.com/ethereum-optimism/optimism/op-batcher/sequencer"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
...@@ -144,11 +143,6 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) { ...@@ -144,11 +143,6 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) {
return nil, err return nil, err
} }
historyDB, err := db.OpenJSONFileDatabase(cfg.SequencerHistoryDBFilename)
if err != nil {
return nil, err
}
chainID, err := l1Client.ChainID(ctx) chainID, err := l1Client.ChainID(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -171,7 +165,6 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) { ...@@ -171,7 +165,6 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) {
MinL1TxSize: cfg.MinL1TxSize, MinL1TxSize: cfg.MinL1TxSize,
MaxL1TxSize: cfg.MaxL1TxSize, MaxL1TxSize: cfg.MaxL1TxSize,
BatchInboxAddress: batchInboxAddress, BatchInboxAddress: batchInboxAddress,
HistoryDB: historyDB,
ChannelTimeout: cfg.ChannelTimeout, ChannelTimeout: cfg.ChannelTimeout,
ChainID: chainID, ChainID: chainID,
PrivKey: sequencerPrivKey, PrivKey: sequencerPrivKey,
......
...@@ -53,10 +53,6 @@ type Config struct { ...@@ -53,10 +53,6 @@ type Config struct {
// batched submission of sequencer transactions. // batched submission of sequencer transactions.
SequencerHDPath string SequencerHDPath string
// SequencerHistoryDBFilename is the filename of the database used to track
// the latest L2 sequencer batches that were published.
SequencerHistoryDBFilename string
// SequencerBatchInboxAddress is the address in which to send batch // SequencerBatchInboxAddress is the address in which to send batch
// transactions. // transactions.
SequencerBatchInboxAddress string SequencerBatchInboxAddress string
...@@ -86,7 +82,6 @@ func NewConfig(ctx *cli.Context) Config { ...@@ -86,7 +82,6 @@ func NewConfig(ctx *cli.Context) Config {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name), ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name), Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name), SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
SequencerHistoryDBFilename: ctx.GlobalString(flags.SequencerHistoryDBFilenameFlag.Name),
SequencerBatchInboxAddress: ctx.GlobalString(flags.SequencerBatchInboxAddressFlag.Name), SequencerBatchInboxAddress: ctx.GlobalString(flags.SequencerBatchInboxAddressFlag.Name),
/* Optional Flags */ /* Optional Flags */
LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name), LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name),
......
package db
import (
"encoding/json"
"io/ioutil"
"os"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
type History struct {
Channels map[derive.ChannelID]uint64 `json:"channels"`
}
func (h *History) Update(add map[derive.ChannelID]uint64, timeout uint64, l1Time uint64) {
// merge the two maps
for id, frameNr := range add {
if prev, ok := h.Channels[id]; ok && prev > frameNr {
continue // don't roll back channels
}
h.Channels[id] = frameNr
}
// prune everything that is timed out
for id := range h.Channels {
if id.Time+timeout < l1Time {
delete(h.Channels, id) // removal of the map during iteration is safe in Go
}
}
}
type HistoryDatabase interface {
LoadHistory() (*History, error)
Update(add map[derive.ChannelID]uint64, timeout uint64, l1Time uint64) error
Close() error
}
type JSONFileDatabase struct {
filename string
}
func OpenJSONFileDatabase(
filename string,
) (*JSONFileDatabase, error) {
_, err := os.Stat(filename)
if os.IsNotExist(err) {
file, err := os.Create(filename)
if err != nil {
return nil, err
}
err = file.Close()
if err != nil {
return nil, err
}
}
return &JSONFileDatabase{
filename: filename,
}, nil
}
func (d *JSONFileDatabase) LoadHistory() (*History, error) {
fileContents, err := os.ReadFile(d.filename)
if err != nil {
return nil, err
}
if len(fileContents) == 0 {
return &History{
Channels: make(map[derive.ChannelID]uint64),
}, nil
}
var history History
err = json.Unmarshal(fileContents, &history)
if err != nil {
return nil, err
}
return &history, nil
}
func (d *JSONFileDatabase) Update(add map[derive.ChannelID]uint64, timeout uint64, l1Time uint64) error {
history, err := d.LoadHistory()
if err != nil {
return err
}
history.Update(add, timeout, l1Time)
newFileContents, err := json.Marshal(history)
if err != nil {
return err
}
return ioutil.WriteFile(d.filename, newFileContents, 0644)
}
func (d *JSONFileDatabase) Close() error {
return nil
}
package db_test
import (
"io/ioutil"
"math/rand"
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-batcher/db"
"github.com/stretchr/testify/require"
)
func TestOpenJSONFileDatabaseNoFile(t *testing.T) {
file, err := ioutil.TempFile("", "history_db.*.json")
require.Nil(t, err)
filename := file.Name()
err = os.Remove(filename)
require.Nil(t, err)
hdb, err := db.OpenJSONFileDatabase(filename)
require.Nil(t, err)
require.NotNil(t, hdb)
err = hdb.Close()
require.Nil(t, err)
}
func TestOpenJSONFileDatabaseEmptyFile(t *testing.T) {
file, err := ioutil.TempFile("", "history_db.*.json")
require.Nil(t, err)
filename := file.Name()
defer os.Remove(filename)
hdb, err := db.OpenJSONFileDatabase(filename)
require.Nil(t, err)
require.NotNil(t, hdb)
err = hdb.Close()
require.Nil(t, err)
}
func TestOpenJSONFileDatabase(t *testing.T) {
file, err := ioutil.TempFile("", "history_db.*.json")
require.Nil(t, err)
filename := file.Name()
defer os.Remove(filename)
hdb, err := db.OpenJSONFileDatabase(filename)
require.Nil(t, err)
require.NotNil(t, hdb)
err = hdb.Close()
require.Nil(t, err)
}
func makeDB(t *testing.T) (*db.JSONFileDatabase, func()) {
file, err := ioutil.TempFile("", "history_db.*.json")
require.Nil(t, err)
filename := file.Name()
hdb, err := db.OpenJSONFileDatabase(filename)
require.Nil(t, err)
require.NotNil(t, hdb)
cleanup := func() {
_ = hdb.Close()
_ = os.Remove(filename)
}
return hdb, cleanup
}
func TestLoadHistoryEmpty(t *testing.T) {
hdb, cleanup := makeDB(t)
defer cleanup()
history, err := hdb.LoadHistory()
require.Nil(t, err)
require.NotNil(t, history)
require.Equal(t, int(0), len(history.Channels))
expHistory := &db.History{
Channels: make(map[derive.ChannelID]uint64),
}
require.Equal(t, expHistory, history)
}
func TestUpdate(t *testing.T) {
hdb, cleanup := makeDB(t)
defer cleanup()
rng := rand.New(rand.NewSource(1234))
// mock some random channel updates in a time range
genUpdate := func(n uint64, minTime uint64, maxTime uint64) map[derive.ChannelID]uint64 {
out := make(map[derive.ChannelID]uint64)
for i := uint64(0); i < n; i++ {
var id derive.ChannelID
rng.Read(id.Data[:])
id.Time = minTime + uint64(rng.Intn(int(maxTime-minTime)))
out[id] = uint64(rng.Intn(1000))
}
return out
}
history, err := hdb.LoadHistory()
require.Nil(t, err)
first := genUpdate(20, 1000, 2000)
// first update: be generous with a large timeout, merge in full update
history.Update(first, 10000, 2000)
require.Equal(t, history.Channels, first)
require.Equal(t, len(history.Channels), 20)
// now try to add something completely new
second := genUpdate(10, 1500, 2400)
history.Update(second, 10000, 2000)
require.Equal(t, len(history.Channels), 20+10)
// now time out some older channels, while adding a few new ones that are too old
third := genUpdate(15, 800, 1500)
history.Update(third, 1000, 2500)
// check if second is not pruned
for id := range second {
require.Contains(t, history.Channels, id)
}
// check if third is fully pruned
for id := range third {
require.NotContains(t, history.Channels, id)
}
// try store history back in the db
require.NoError(t, hdb.Update(history.Channels, 0, 0))
// time out everything
history.Update(nil, 0, 2400)
require.Len(t, history.Channels, 0)
}
...@@ -86,13 +86,6 @@ var ( ...@@ -86,13 +86,6 @@ var (
Required: true, Required: true,
EnvVar: prefixEnvVar("SEQUENCER_HD_PATH"), EnvVar: prefixEnvVar("SEQUENCER_HD_PATH"),
} }
SequencerHistoryDBFilenameFlag = cli.StringFlag{
Name: "sequencer-history-db-filename",
Usage: "File name used to identify the latest L2 batches submitted " +
"by the sequencer",
Required: true,
EnvVar: prefixEnvVar("SEQUENCER_HISTORY_DB_FILENAME"),
}
SequencerBatchInboxAddressFlag = cli.StringFlag{ SequencerBatchInboxAddressFlag = cli.StringFlag{
Name: "sequencer-batch-inbox-address", Name: "sequencer-batch-inbox-address",
Usage: "L1 Address to receive batch transactions", Usage: "L1 Address to receive batch transactions",
...@@ -128,7 +121,6 @@ var requiredFlags = []cli.Flag{ ...@@ -128,7 +121,6 @@ var requiredFlags = []cli.Flag{
ResubmissionTimeoutFlag, ResubmissionTimeoutFlag,
MnemonicFlag, MnemonicFlag,
SequencerHDPathFlag, SequencerHDPathFlag,
SequencerHistoryDBFilenameFlag,
SequencerBatchInboxAddressFlag, SequencerBatchInboxAddressFlag,
} }
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"math/big" "math/big"
"time" "time"
"github.com/ethereum-optimism/optimism/op-batcher/db"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -28,9 +27,6 @@ type Config struct { ...@@ -28,9 +27,6 @@ type Config struct {
// Where to send the batch txs to. // Where to send the batch txs to.
BatchInboxAddress common.Address BatchInboxAddress common.Address
// Persists progress of submitting block data, to avoid redoing any work
HistoryDB db.HistoryDatabase
// The batcher can decide to set it shorter than the actual timeout, // The batcher can decide to set it shorter than the actual timeout,
// since submitting continued channel data to L1 is not instantaneous. // since submitting continued channel data to L1 is not instantaneous.
// It's not worth it to work with nearly timed-out channels. // It's not worth it to work with nearly timed-out channels.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment