Commit e80d23b6 authored by Adrian Sutton's avatar Adrian Sutton Committed by GitHub

op-program: Use PebbleDB for DiskKV (Reapply with fixes) (#11709)

* feat(op-program): Use `PebbleDB` for `DiskKV` (#11705)

* feat(op-program): Use `PebbleDB` for `DiskKV`

* close db

* fix `testFaultProofProgramScenario` tests

* switch to snappy compression

https://github.com/cockroachdb/pebble/issues/3434

* fix tempdir

* update compat release

* defer k/v until preimage server and hinter have both exited

* Only open preimage source when actually needing preimages.

Avoids concurrent access to the pebbledb.

* op-program: Handle interrupts in host

Ensures the preimage kv store is closed cleanly even when the process is sent an interrupt signal.
Enables non-blocking IO so that read calls return io.EOF when a file handle is closed rather than blocking forever.

* op-program: Restore file based kv store. Add pebble as an additional option.

* op-program: Add option to specify KV store format to use

* op-program: Switch pebble to perform sync writes.

* op-program: Rename disk kv to file kv

* op-program: Rename DBFormat to DataFormat for consistency.

---------
Co-authored-by: default avatarclabby <ben@clab.by>
parent 72eff5f9
...@@ -217,9 +217,29 @@ func (p *ProcessPreimageOracle) Close() error { ...@@ -217,9 +217,29 @@ func (p *ProcessPreimageOracle) Close() error {
if p.cmd == nil { if p.cmd == nil {
return nil return nil
} }
tryWait := func(dur time.Duration) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), dur)
defer cancel()
select {
case <-ctx.Done():
return false, nil
case err := <-p.waitErr:
return true, err
}
}
// Give the pre-image server time to exit cleanly before killing it. // Give the pre-image server time to exit cleanly before killing it.
time.Sleep(time.Second * 1) if exited, err := tryWait(1 * time.Second); exited {
return err
}
// Politely ask the process to exit and give it some more time
_ = p.cmd.Process.Signal(os.Interrupt) _ = p.cmd.Process.Signal(os.Interrupt)
if exited, err := tryWait(30 * time.Second); exited {
return err
}
// Force the process to exit
_ = p.cmd.Process.Signal(os.Kill)
return <-p.waitErr return <-p.waitErr
} }
......
...@@ -37,12 +37,14 @@ type AsteriscTraceProvider struct { ...@@ -37,12 +37,14 @@ type AsteriscTraceProvider struct {
func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm.OracleServerExecutor, prestateProvider types.PrestateProvider, asteriscPrestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProvider { func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm.OracleServerExecutor, prestateProvider types.PrestateProvider, asteriscPrestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProvider {
return &AsteriscTraceProvider{ return &AsteriscTraceProvider{
logger: logger, logger: logger,
dir: dir, dir: dir,
prestate: asteriscPrestate, prestate: asteriscPrestate,
generator: vm.NewExecutor(logger, m, cfg, vmCfg, asteriscPrestate, localInputs), generator: vm.NewExecutor(logger, m, cfg, vmCfg, asteriscPrestate, localInputs),
gameDepth: gameDepth, gameDepth: gameDepth,
preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), preimageLoader: utils.NewPreimageLoader(func() utils.PreimageSource {
return kvstore.NewFileKV(vm.PreimageDir(dir))
}),
PrestateProvider: prestateProvider, PrestateProvider: prestateProvider,
} }
} }
...@@ -174,12 +176,14 @@ type AsteriscTraceProviderForTest struct { ...@@ -174,12 +176,14 @@ type AsteriscTraceProviderForTest struct {
func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest { func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest {
p := &AsteriscTraceProvider{ p := &AsteriscTraceProvider{
logger: logger, logger: logger,
dir: dir, dir: dir,
prestate: cfg.AsteriscAbsolutePreState, prestate: cfg.AsteriscAbsolutePreState,
generator: vm.NewExecutor(logger, m, cfg.Asterisc, vm.NewOpProgramServerExecutor(), cfg.AsteriscAbsolutePreState, localInputs), generator: vm.NewExecutor(logger, m, cfg.Asterisc, vm.NewOpProgramServerExecutor(), cfg.AsteriscAbsolutePreState, localInputs),
gameDepth: gameDepth, gameDepth: gameDepth,
preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), preimageLoader: utils.NewPreimageLoader(func() utils.PreimageSource {
return kvstore.NewFileKV(vm.PreimageDir(dir))
}),
} }
return &AsteriscTraceProviderForTest{p} return &AsteriscTraceProviderForTest{p}
} }
......
...@@ -40,12 +40,14 @@ type CannonTraceProvider struct { ...@@ -40,12 +40,14 @@ type CannonTraceProvider struct {
func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm.OracleServerExecutor, prestateProvider types.PrestateProvider, prestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProvider { func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm.OracleServerExecutor, prestateProvider types.PrestateProvider, prestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProvider {
return &CannonTraceProvider{ return &CannonTraceProvider{
logger: logger, logger: logger,
dir: dir, dir: dir,
prestate: prestate, prestate: prestate,
generator: vm.NewExecutor(logger, m, cfg, vmCfg, prestate, localInputs), generator: vm.NewExecutor(logger, m, cfg, vmCfg, prestate, localInputs),
gameDepth: gameDepth, gameDepth: gameDepth,
preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), preimageLoader: utils.NewPreimageLoader(func() utils.PreimageSource {
return kvstore.NewFileKV(vm.PreimageDir(dir))
}),
PrestateProvider: prestateProvider, PrestateProvider: prestateProvider,
} }
} }
...@@ -178,12 +180,14 @@ type CannonTraceProviderForTest struct { ...@@ -178,12 +180,14 @@ type CannonTraceProviderForTest struct {
func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProviderForTest { func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProviderForTest {
p := &CannonTraceProvider{ p := &CannonTraceProvider{
logger: logger, logger: logger,
dir: dir, dir: dir,
prestate: cfg.CannonAbsolutePreState, prestate: cfg.CannonAbsolutePreState,
generator: vm.NewExecutor(logger, m, cfg.Cannon, vm.NewOpProgramServerExecutor(), cfg.CannonAbsolutePreState, localInputs), generator: vm.NewExecutor(logger, m, cfg.Cannon, vm.NewOpProgramServerExecutor(), cfg.CannonAbsolutePreState, localInputs),
gameDepth: gameDepth, gameDepth: gameDepth,
preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), preimageLoader: utils.NewPreimageLoader(func() utils.PreimageSource {
return kvstore.NewFileKV(vm.PreimageDir(dir))
}),
} }
return &CannonTraceProviderForTest{p} return &CannonTraceProviderForTest{p}
} }
......
...@@ -27,15 +27,20 @@ var ( ...@@ -27,15 +27,20 @@ var (
ErrInvalidBlobKeyPreimage = errors.New("invalid blob key preimage") ErrInvalidBlobKeyPreimage = errors.New("invalid blob key preimage")
) )
type preimageSource func(key common.Hash) ([]byte, error) type PreimageSource interface {
Get(key common.Hash) ([]byte, error)
Close() error
}
type PreimageSourceCreator func() PreimageSource
type PreimageLoader struct { type PreimageLoader struct {
getPreimage preimageSource makeSource PreimageSourceCreator
} }
func NewPreimageLoader(getPreimage preimageSource) *PreimageLoader { func NewPreimageLoader(makeSource PreimageSourceCreator) *PreimageLoader {
return &PreimageLoader{ return &PreimageLoader{
getPreimage: getPreimage, makeSource: makeSource,
} }
} }
...@@ -57,7 +62,9 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac ...@@ -57,7 +62,9 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac
// The key for a blob field element is a keccak hash of commitment++fieldElementIndex. // The key for a blob field element is a keccak hash of commitment++fieldElementIndex.
// First retrieve the preimage of the key as a keccak hash so we have the commitment and required field element // First retrieve the preimage of the key as a keccak hash so we have the commitment and required field element
inputsKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey() inputsKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey()
inputs, err := l.getPreimage(inputsKey) source := l.makeSource()
defer source.Close()
inputs, err := source.Get(inputsKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get key preimage: %w", err) return nil, fmt.Errorf("failed to get key preimage: %w", err)
} }
...@@ -74,7 +81,7 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac ...@@ -74,7 +81,7 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac
for i := 0; i < params.BlobTxFieldElementsPerBlob; i++ { for i := 0; i < params.BlobTxFieldElementsPerBlob; i++ {
binary.BigEndian.PutUint64(fieldElemKey[72:], uint64(i)) binary.BigEndian.PutUint64(fieldElemKey[72:], uint64(i))
key := preimage.BlobKey(crypto.Keccak256(fieldElemKey)).PreimageKey() key := preimage.BlobKey(crypto.Keccak256(fieldElemKey)).PreimageKey()
fieldElement, err := l.getPreimage(key) fieldElement, err := source.Get(key)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to load field element %v with key %v: %w", i, common.Hash(key), err) return nil, fmt.Errorf("failed to load field element %v with key %v: %w", i, common.Hash(key), err)
} }
...@@ -105,7 +112,9 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac ...@@ -105,7 +112,9 @@ func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOrac
func (l *PreimageLoader) loadPrecompilePreimage(proof *ProofData) (*types.PreimageOracleData, error) { func (l *PreimageLoader) loadPrecompilePreimage(proof *ProofData) (*types.PreimageOracleData, error) {
inputKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey() inputKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey()
input, err := l.getPreimage(inputKey) source := l.makeSource()
defer source.Close()
input, err := source.Get(inputKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get key preimage: %w", err) return nil, fmt.Errorf("failed to get key preimage: %w", err)
} }
......
...@@ -20,14 +20,20 @@ import ( ...@@ -20,14 +20,20 @@ import (
) )
func TestPreimageLoader_NoPreimage(t *testing.T) { func TestPreimageLoader_NoPreimage(t *testing.T) {
loader := NewPreimageLoader(kvstore.NewMemKV().Get) kv := kvstore.NewMemKV()
loader := NewPreimageLoader(func() PreimageSource {
return kv
})
actual, err := loader.LoadPreimage(&ProofData{}) actual, err := loader.LoadPreimage(&ProofData{})
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, actual) require.Nil(t, actual)
} }
func TestPreimageLoader_LocalPreimage(t *testing.T) { func TestPreimageLoader_LocalPreimage(t *testing.T) {
loader := NewPreimageLoader(kvstore.NewMemKV().Get) kv := kvstore.NewMemKV()
loader := NewPreimageLoader(func() PreimageSource {
return kv
})
proof := &ProofData{ proof := &ProofData{
OracleKey: common.Hash{byte(preimage.LocalKeyType), 0xaa, 0xbb}.Bytes(), OracleKey: common.Hash{byte(preimage.LocalKeyType), 0xaa, 0xbb}.Bytes(),
OracleValue: nil, OracleValue: nil,
...@@ -48,7 +54,10 @@ func TestPreimageLoader_SimpleTypes(t *testing.T) { ...@@ -48,7 +54,10 @@ func TestPreimageLoader_SimpleTypes(t *testing.T) {
for _, keyType := range tests { for _, keyType := range tests {
keyType := keyType keyType := keyType
t.Run(fmt.Sprintf("type-%v", keyType), func(t *testing.T) { t.Run(fmt.Sprintf("type-%v", keyType), func(t *testing.T) {
loader := NewPreimageLoader(kvstore.NewMemKV().Get) kv := kvstore.NewMemKV()
loader := NewPreimageLoader(func() PreimageSource {
return kv
})
proof := &ProofData{ proof := &ProofData{
OracleKey: common.Hash{byte(keyType), 0xaa, 0xbb}.Bytes(), OracleKey: common.Hash{byte(keyType), 0xaa, 0xbb}.Bytes(),
OracleValue: []byte{1, 2, 3, 4, 5, 6}, OracleValue: []byte{1, 2, 3, 4, 5, 6},
...@@ -90,7 +99,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) { ...@@ -90,7 +99,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) {
t.Run("NoKeyPreimage", func(t *testing.T) { t.Run("NoKeyPreimage", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
proof := &ProofData{ proof := &ProofData{
OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xaf}.Bytes(), OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xaf}.Bytes(),
OracleValue: proof.OracleValue, OracleValue: proof.OracleValue,
...@@ -102,7 +113,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) { ...@@ -102,7 +113,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) {
t.Run("InvalidKeyPreimage", func(t *testing.T) { t.Run("InvalidKeyPreimage", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
proof := &ProofData{ proof := &ProofData{
OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xad}.Bytes(), OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xad}.Bytes(),
OracleValue: proof.OracleValue, OracleValue: proof.OracleValue,
...@@ -115,7 +128,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) { ...@@ -115,7 +128,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) {
t.Run("MissingBlobs", func(t *testing.T) { t.Run("MissingBlobs", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
proof := &ProofData{ proof := &ProofData{
OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xae}.Bytes(), OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xae}.Bytes(),
OracleValue: proof.OracleValue, OracleValue: proof.OracleValue,
...@@ -128,7 +143,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) { ...@@ -128,7 +143,9 @@ func TestPreimageLoader_BlobPreimage(t *testing.T) {
t.Run("Valid", func(t *testing.T) { t.Run("Valid", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
storeBlob(t, kv, gokzg4844.KZGCommitment(commitment), gokzg4844.Blob(blob)) storeBlob(t, kv, gokzg4844.KZGCommitment(commitment), gokzg4844.Blob(blob))
actual, err := loader.LoadPreimage(proof) actual, err := loader.LoadPreimage(proof)
require.NoError(t, err) require.NoError(t, err)
...@@ -161,13 +178,17 @@ func TestPreimageLoader_PrecompilePreimage(t *testing.T) { ...@@ -161,13 +178,17 @@ func TestPreimageLoader_PrecompilePreimage(t *testing.T) {
t.Run("NoInputPreimage", func(t *testing.T) { t.Run("NoInputPreimage", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
_, err := loader.LoadPreimage(proof) _, err := loader.LoadPreimage(proof)
require.ErrorIs(t, err, kvstore.ErrNotFound) require.ErrorIs(t, err, kvstore.ErrNotFound)
}) })
t.Run("Valid", func(t *testing.T) { t.Run("Valid", func(t *testing.T) {
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
loader := NewPreimageLoader(kv.Get) loader := NewPreimageLoader(func() PreimageSource {
return kv
})
require.NoError(t, kv.Put(preimage.Keccak256Key(proof.OracleKey).PreimageKey(), input)) require.NoError(t, kv.Put(preimage.Keccak256Key(proof.OracleKey).PreimageKey(), input))
actual, err := loader.LoadPreimage(proof) actual, err := loader.LoadPreimage(proof)
require.NoError(t, err) require.NoError(t, err)
......
package preimage package preimage
import ( import (
"errors"
"fmt"
"io" "io"
"os" "os"
"syscall"
) )
// FileChannel is a unidirectional channel for file I/O // FileChannel is a unidirectional channel for file I/O
...@@ -41,10 +44,14 @@ func (rw *ReadWritePair) Writer() *os.File { ...@@ -41,10 +44,14 @@ func (rw *ReadWritePair) Writer() *os.File {
} }
func (rw *ReadWritePair) Close() error { func (rw *ReadWritePair) Close() error {
var combinedErr error
if err := rw.r.Close(); err != nil { if err := rw.r.Close(); err != nil {
return err combinedErr = errors.Join(fmt.Errorf("failed to close reader: %w", err))
} }
return rw.w.Close() if err := rw.w.Close(); err != nil {
combinedErr = errors.Join(fmt.Errorf("failed to close writer: %w", err))
}
return combinedErr
} }
// CreateBidirectionalChannel creates a pair of FileChannels that are connected to each other. // CreateBidirectionalChannel creates a pair of FileChannels that are connected to each other.
...@@ -68,14 +75,21 @@ const ( ...@@ -68,14 +75,21 @@ const (
) )
func ClientHinterChannel() *ReadWritePair { func ClientHinterChannel() *ReadWritePair {
r := os.NewFile(HClientRFd, "preimage-hint-read") r := newFileNonBlocking(HClientRFd, "preimage-hint-read")
w := os.NewFile(HClientWFd, "preimage-hint-write") w := newFileNonBlocking(HClientWFd, "preimage-hint-write")
return NewReadWritePair(r, w) return NewReadWritePair(r, w)
} }
// ClientPreimageChannel returns a FileChannel for the preimage oracle in a detached context // ClientPreimageChannel returns a FileChannel for the preimage oracle in a detached context
func ClientPreimageChannel() *ReadWritePair { func ClientPreimageChannel() *ReadWritePair {
r := os.NewFile(PClientRFd, "preimage-oracle-read") r := newFileNonBlocking(PClientRFd, "preimage-oracle-read")
w := os.NewFile(PClientWFd, "preimage-oracle-write") w := newFileNonBlocking(PClientWFd, "preimage-oracle-write")
return NewReadWritePair(r, w) return NewReadWritePair(r, w)
} }
func newFileNonBlocking(fd int, name string) *os.File {
// Try to enable non-blocking mode for IO so that read calls return when the file is closed
// This may not be possible on all systems so errors are ignored.
_ = syscall.SetNonblock(fd, true)
return os.NewFile(uintptr(fd), name)
}
...@@ -6,10 +6,6 @@ import ( ...@@ -6,10 +6,6 @@ import (
"io" "io"
"os" "os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
preimage "github.com/ethereum-optimism/optimism/op-preimage" preimage "github.com/ethereum-optimism/optimism/op-preimage"
"github.com/ethereum-optimism/optimism/op-program/client/claim" "github.com/ethereum-optimism/optimism/op-program/client/claim"
...@@ -17,14 +13,17 @@ import ( ...@@ -17,14 +13,17 @@ import (
"github.com/ethereum-optimism/optimism/op-program/client/l1" "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2" "github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
) )
// Main executes the client program in a detached context and exits the current process. // Main executes the client program in a detached context and exits the current process.
// The client runtime environment must be preset before calling this function. // The client runtime environment must be preset before calling this function.
func Main(logger log.Logger) { func Main(logger log.Logger) {
log.Info("Starting fault proof program client") log.Info("Starting fault proof program client")
preimageOracle := CreatePreimageChannel() preimageOracle := preimage.ClientPreimageChannel()
preimageHinter := CreateHinterChannel() preimageHinter := preimage.ClientHinterChannel()
if err := RunProgram(logger, preimageOracle, preimageHinter); errors.Is(err, claim.ErrClaimNotValid) { if err := RunProgram(logger, preimageOracle, preimageHinter); errors.Is(err, claim.ErrClaimNotValid) {
log.Error("Claim is invalid", "err", err) log.Error("Claim is invalid", "err", err)
os.Exit(1) os.Exit(1)
...@@ -76,16 +75,3 @@ func runDerivation(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainCon ...@@ -76,16 +75,3 @@ func runDerivation(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainCon
} }
return claim.ValidateClaim(logger, l2ClaimBlockNum, eth.Bytes32(l2Claim), l2Source) return claim.ValidateClaim(logger, l2ClaimBlockNum, eth.Bytes32(l2Claim), l2Source)
} }
func CreateHinterChannel() preimage.FileChannel {
r := os.NewFile(HClientRFd, "preimage-hint-read")
w := os.NewFile(HClientWFd, "preimage-hint-write")
return preimage.NewReadWritePair(r, w)
}
// CreatePreimageChannel returns a FileChannel for the preimage oracle in a detached context
func CreatePreimageChannel() preimage.FileChannel {
r := os.NewFile(PClientRFd, "preimage-oracle-read")
w := os.NewFile(PClientWFd, "preimage-oracle-write")
return preimage.NewReadWritePair(r, w)
}
...@@ -2,6 +2,7 @@ package main ...@@ -2,6 +2,7 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"strconv" "strconv"
"testing" "testing"
...@@ -9,6 +10,7 @@ import ( ...@@ -9,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-program/chainconfig" "github.com/ethereum-optimism/optimism/op-program/chainconfig"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/types"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources"
...@@ -119,6 +121,20 @@ func TestDataDir(t *testing.T) { ...@@ -119,6 +121,20 @@ func TestDataDir(t *testing.T) {
require.Equal(t, expected, cfg.DataDir) require.Equal(t, expected, cfg.DataDir)
} }
func TestDataFormat(t *testing.T) {
for _, format := range types.SupportedDataFormats {
format := format
t.Run(fmt.Sprintf("Valid-%v", format), func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--data.format", string(format)))
require.Equal(t, format, cfg.DataFormat)
})
}
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid data format: foo", addRequiredArgs("--data.format", "foo"))
})
}
func TestL2(t *testing.T) { func TestL2(t *testing.T) {
expected := "https://example.com:8545" expected := "https://example.com:8545"
cfg := configForArgs(t, addRequiredArgs("--l2", expected)) cfg := configForArgs(t, addRequiredArgs("--l2", expected))
......
...@@ -5,8 +5,10 @@ import ( ...@@ -5,8 +5,10 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"slices"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-program/host/types"
opnode "github.com/ethereum-optimism/optimism/op-node" opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -30,6 +32,7 @@ var ( ...@@ -30,6 +32,7 @@ var (
ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number") ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number")
ErrDataDirRequired = errors.New("datadir must be specified when in non-fetching mode") ErrDataDirRequired = errors.New("datadir must be specified when in non-fetching mode")
ErrNoExecInServerMode = errors.New("exec command must not be set when in server mode") ErrNoExecInServerMode = errors.New("exec command must not be set when in server mode")
ErrInvalidDataFormat = errors.New("invalid data format")
) )
type Config struct { type Config struct {
...@@ -38,6 +41,9 @@ type Config struct { ...@@ -38,6 +41,9 @@ type Config struct {
// If not set, an in-memory key-value store is used and fetching data must be enabled // If not set, an in-memory key-value store is used and fetching data must be enabled
DataDir string DataDir string
// DataFormat specifies the format to use for on-disk storage. Only applies when DataDir is set.
DataFormat types.DataFormat
// L1Head is the block hash of the L1 chain head block // L1Head is the block hash of the L1 chain head block
L1Head common.Hash L1Head common.Hash
L1URL string L1URL string
...@@ -100,6 +106,9 @@ func (c *Config) Check() error { ...@@ -100,6 +106,9 @@ func (c *Config) Check() error {
if c.ServerMode && c.ExecCmd != "" { if c.ServerMode && c.ExecCmd != "" {
return ErrNoExecInServerMode return ErrNoExecInServerMode
} }
if c.DataDir != "" && !slices.Contains(types.SupportedDataFormats, c.DataFormat) {
return ErrInvalidDataFormat
}
return nil return nil
} }
...@@ -130,6 +139,7 @@ func NewConfig( ...@@ -130,6 +139,7 @@ func NewConfig(
L2ClaimBlockNumber: l2ClaimBlockNum, L2ClaimBlockNumber: l2ClaimBlockNum,
L1RPCKind: sources.RPCKindStandard, L1RPCKind: sources.RPCKindStandard,
IsCustomChainConfig: isCustomConfig, IsCustomChainConfig: isCustomConfig,
DataFormat: types.DataFormatFile,
} }
} }
...@@ -183,9 +193,14 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { ...@@ -183,9 +193,14 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid genesis: %w", err) return nil, fmt.Errorf("invalid genesis: %w", err)
} }
dbFormat := types.DataFormat(ctx.String(flags.DataFormat.Name))
if !slices.Contains(types.SupportedDataFormats, dbFormat) {
return nil, fmt.Errorf("invalid %w: %v", ErrInvalidDataFormat, dbFormat)
}
return &Config{ return &Config{
Rollup: rollupCfg, Rollup: rollupCfg,
DataDir: ctx.String(flags.DataDir.Name), DataDir: ctx.String(flags.DataDir.Name),
DataFormat: dbFormat,
L2URL: ctx.String(flags.L2NodeAddr.Name), L2URL: ctx.String(flags.L2NodeAddr.Name),
L2ChainConfig: l2ChainConfig, L2ChainConfig: l2ChainConfig,
L2Head: l2Head, L2Head: l2Head,
......
package config package config
import ( import (
"fmt"
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-program/chainconfig" "github.com/ethereum-optimism/optimism/op-program/chainconfig"
"github.com/ethereum-optimism/optimism/op-program/host/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -173,6 +175,22 @@ func TestIsCustomChainConfig(t *testing.T) { ...@@ -173,6 +175,22 @@ func TestIsCustomChainConfig(t *testing.T) {
} }
func TestDBFormat(t *testing.T) {
t.Run("invalid", func(t *testing.T) {
cfg := validConfig()
cfg.DataFormat = "foo"
require.ErrorIs(t, cfg.Check(), ErrInvalidDataFormat)
})
for _, format := range types.SupportedDataFormats {
format := format
t.Run(fmt.Sprintf("%v", format), func(t *testing.T) {
cfg := validConfig()
cfg.DataFormat = format
require.NoError(t, cfg.Check())
})
}
}
func validConfig() *Config { func validConfig() *Config {
cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum)
cfg.DataDir = "/tmp/configTest" cfg.DataDir = "/tmp/configTest"
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/ethereum-optimism/optimism/op-program/host/types"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
...@@ -35,6 +36,12 @@ var ( ...@@ -35,6 +36,12 @@ var (
Usage: "Directory to use for preimage data storage. Default uses in-memory storage", Usage: "Directory to use for preimage data storage. Default uses in-memory storage",
EnvVars: prefixEnvVars("DATADIR"), EnvVars: prefixEnvVars("DATADIR"),
} }
DataFormat = &cli.StringFlag{
Name: "data.format",
Usage: fmt.Sprintf("Format to use for preimage data storage. Available formats: %s", openum.EnumString(types.SupportedDataFormats)),
EnvVars: prefixEnvVars("DATA_FORMAT"),
Value: string(types.DataFormatFile),
}
L2NodeAddr = &cli.StringFlag{ L2NodeAddr = &cli.StringFlag{
Name: "l2", Name: "l2",
Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)", Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)",
...@@ -122,6 +129,7 @@ var programFlags = []cli.Flag{ ...@@ -122,6 +129,7 @@ var programFlags = []cli.Flag{
RollupConfig, RollupConfig,
Network, Network,
DataDir, DataDir,
DataFormat,
L2NodeAddr, L2NodeAddr,
L2GenesisPath, L2GenesisPath,
L1NodeAddr, L1NodeAddr,
......
...@@ -16,8 +16,10 @@ import ( ...@@ -16,8 +16,10 @@ import (
"github.com/ethereum-optimism/optimism/op-program/host/flags" "github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore" "github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher" "github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/host/types"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/ctxinterrupt"
"github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -35,10 +37,12 @@ func Main(logger log.Logger, cfg *config.Config) error { ...@@ -35,10 +37,12 @@ func Main(logger log.Logger, cfg *config.Config) error {
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, logger) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, logger)
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkDisplayName) cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkDisplayName)
ctx := context.Background() hostCtx, stop := ctxinterrupt.WithSignalWaiter(context.Background())
defer stop()
ctx := ctxinterrupt.WithCancelOnInterrupt(hostCtx)
if cfg.ServerMode { if cfg.ServerMode {
preimageChan := cl.CreatePreimageChannel() preimageChan := preimage.ClientPreimageChannel()
hinterChan := cl.CreateHinterChannel() hinterChan := preimage.ClientHinterChannel()
return PreimageServer(ctx, logger, cfg, preimageChan, hinterChan) return PreimageServer(ctx, logger, cfg, preimageChan, hinterChan)
} }
...@@ -122,6 +126,10 @@ func FaultProofProgram(ctx context.Context, logger log.Logger, cfg *config.Confi ...@@ -122,6 +126,10 @@ func FaultProofProgram(ctx context.Context, logger log.Logger, cfg *config.Confi
func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config, preimageChannel preimage.FileChannel, hintChannel preimage.FileChannel) error { func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config, preimageChannel preimage.FileChannel, hintChannel preimage.FileChannel) error {
var serverDone chan error var serverDone chan error
var hinterDone chan error var hinterDone chan error
logger.Info("Starting preimage server")
var kv kvstore.KV
// Close the preimage/hint channels, and then kv store once the server and hinter have exited.
defer func() { defer func() {
preimageChannel.Close() preimageChannel.Close()
hintChannel.Close() hintChannel.Close()
...@@ -133,18 +141,28 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config, ...@@ -133,18 +141,28 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config,
// Wait for hinter to complete // Wait for hinter to complete
<-hinterDone <-hinterDone
} }
if kv != nil {
kv.Close()
}
}() }()
logger.Info("Starting preimage server")
var kv kvstore.KV
if cfg.DataDir == "" { if cfg.DataDir == "" {
logger.Info("Using in-memory storage") logger.Info("Using in-memory storage")
kv = kvstore.NewMemKV() kv = kvstore.NewMemKV()
} else { } else {
logger.Info("Creating disk storage", "datadir", cfg.DataDir) logger.Info("Creating disk storage", "datadir", cfg.DataDir, "format", cfg.DataFormat)
if err := os.MkdirAll(cfg.DataDir, 0755); err != nil { if err := os.MkdirAll(cfg.DataDir, 0755); err != nil {
return fmt.Errorf("creating datadir: %w", err) return fmt.Errorf("creating datadir: %w", err)
} }
kv = kvstore.NewDiskKV(cfg.DataDir) switch cfg.DataFormat {
case types.DataFormatFile:
kv = kvstore.NewFileKV(cfg.DataDir)
case types.DataFormatPebble:
kv = kvstore.NewPebbleKV(cfg.DataDir)
default:
return fmt.Errorf("invalid data format: %s", cfg.DataFormat)
}
} }
var ( var (
...@@ -178,6 +196,9 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config, ...@@ -178,6 +196,9 @@ func PreimageServer(ctx context.Context, logger log.Logger, cfg *config.Config,
return err return err
case err := <-hinterDone: case err := <-hinterDone:
return err return err
case <-ctx.Done():
logger.Info("Shutting down")
return ctx.Err()
} }
} }
......
...@@ -13,28 +13,28 @@ import ( ...@@ -13,28 +13,28 @@ import (
) )
// read/write mode for user/group/other, not executable. // read/write mode for user/group/other, not executable.
const diskPermission = 0666 const filePermission = 0666
// DiskKV is a disk-backed key-value store, every key-value pair is a hex-encoded .txt file, with the value as content. // FileKV is a disk-backed key-value store, every key-value pair is a hex-encoded .txt file, with the value as content.
// DiskKV is safe for concurrent use with a single DiskKV instance. // FileKV is safe for concurrent use with a single FileKV instance.
// DiskKV is safe for concurrent use between different DiskKV instances of the same disk directory as long as the // FileKV is safe for concurrent use between different FileKV instances of the same disk directory as long as the
// file system supports atomic renames. // file system supports atomic renames.
type DiskKV struct { type FileKV struct {
sync.RWMutex sync.RWMutex
path string path string
} }
// NewDiskKV creates a DiskKV that puts/gets pre-images as files in the given directory path. // NewFileKV creates a FileKV that puts/gets pre-images as files in the given directory path.
// The path must exist, or subsequent Put/Get calls will error when it does not. // The path must exist, or subsequent Put/Get calls will error when it does not.
func NewDiskKV(path string) *DiskKV { func NewFileKV(path string) *FileKV {
return &DiskKV{path: path} return &FileKV{path: path}
} }
func (d *DiskKV) pathKey(k common.Hash) string { func (d *FileKV) pathKey(k common.Hash) string {
return path.Join(d.path, k.String()+".txt") return path.Join(d.path, k.String()+".txt")
} }
func (d *DiskKV) Put(k common.Hash, v []byte) error { func (d *FileKV) Put(k common.Hash, v []byte) error {
d.Lock() d.Lock()
defer d.Unlock() defer d.Unlock()
f, err := openTempFile(d.path, k.String()+".txt.*") f, err := openTempFile(d.path, k.String()+".txt.*")
...@@ -72,10 +72,10 @@ func openTempFile(dir string, nameTemplate string) (*os.File, error) { ...@@ -72,10 +72,10 @@ func openTempFile(dir string, nameTemplate string) (*os.File, error) {
return f, nil return f, nil
} }
func (d *DiskKV) Get(k common.Hash) ([]byte, error) { func (d *FileKV) Get(k common.Hash) ([]byte, error) {
d.RLock() d.RLock()
defer d.RUnlock() defer d.RUnlock()
f, err := os.OpenFile(d.pathKey(k), os.O_RDONLY, diskPermission) f, err := os.OpenFile(d.pathKey(k), os.O_RDONLY, filePermission)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
return nil, ErrNotFound return nil, ErrNotFound
...@@ -90,4 +90,8 @@ func (d *DiskKV) Get(k common.Hash) ([]byte, error) { ...@@ -90,4 +90,8 @@ func (d *DiskKV) Get(k common.Hash) ([]byte, error) {
return hex.DecodeString(string(dat)) return hex.DecodeString(string(dat))
} }
var _ KV = (*DiskKV)(nil) func (d *FileKV) Close() error {
return nil
}
var _ KV = (*FileKV)(nil)
...@@ -10,14 +10,18 @@ import ( ...@@ -10,14 +10,18 @@ import (
func TestDiskKV(t *testing.T) { func TestDiskKV(t *testing.T) {
tmp := t.TempDir() // automatically removed by testing cleanup tmp := t.TempDir() // automatically removed by testing cleanup
kv := NewDiskKV(tmp) kv := NewFileKV(tmp)
t.Cleanup(func() { // Can't use defer because kvTest runs tests in parallel.
require.NoError(t, kv.Close())
})
kvTest(t, kv) kvTest(t, kv)
} }
func TestCreateMissingDirectory(t *testing.T) { func TestCreateMissingDirectory(t *testing.T) {
tmp := t.TempDir() tmp := t.TempDir()
dir := filepath.Join(tmp, "data") dir := filepath.Join(tmp, "data")
kv := NewDiskKV(dir) kv := NewFileKV(dir)
defer kv.Close()
val := []byte{1, 2, 3, 4} val := []byte{1, 2, 3, 4}
key := crypto.Keccak256Hash(val) key := crypto.Keccak256Hash(val)
require.NoError(t, kv.Put(key, val)) require.NoError(t, kv.Put(key, val))
......
...@@ -19,4 +19,7 @@ type KV interface { ...@@ -19,4 +19,7 @@ type KV interface {
// It returns ErrNotFound when the pre-image cannot be found. // It returns ErrNotFound when the pre-image cannot be found.
// KV store implementations may return additional errors specific to the KV storage. // KV store implementations may return additional errors specific to the KV storage.
Get(k common.Hash) ([]byte, error) Get(k common.Hash) ([]byte, error)
// Closes the KV store.
Close() error
} }
...@@ -37,3 +37,7 @@ func (m *MemKV) Get(k common.Hash) ([]byte, error) { ...@@ -37,3 +37,7 @@ func (m *MemKV) Get(k common.Hash) ([]byte, error) {
} }
return slices.Clone(v), nil return slices.Clone(v), nil
} }
func (m *MemKV) Close() error {
return nil
}
package kvstore
import (
"errors"
"fmt"
"runtime"
"sync"
"github.com/cockroachdb/pebble"
"github.com/ethereum/go-ethereum/common"
)
// PebbleKV is a disk-backed key-value store, with PebbleDB as the underlying DBMS.
// PebbleKV is safe for concurrent use with a single PebbleKV instance.
type PebbleKV struct {
sync.RWMutex
db *pebble.DB
}
// NewPebbleKV creates a PebbleKV that puts/gets pre-images as files in the given directory path.
// The path must exist, or subsequent Put/Get calls will error when it does not.
func NewPebbleKV(path string) *PebbleKV {
opts := &pebble.Options{
Cache: pebble.NewCache(int64(32 * 1024 * 1024)),
MaxConcurrentCompactions: runtime.NumCPU,
Levels: []pebble.LevelOptions{
{Compression: pebble.SnappyCompression},
},
}
db, err := pebble.Open(path, opts)
if err != nil {
panic(fmt.Errorf("failed to open pebbledb at %s: %w", path, err))
}
return &PebbleKV{db: db}
}
func (d *PebbleKV) Put(k common.Hash, v []byte) error {
d.Lock()
defer d.Unlock()
return d.db.Set(k.Bytes(), v, pebble.Sync)
}
func (d *PebbleKV) Get(k common.Hash) ([]byte, error) {
d.RLock()
defer d.RUnlock()
dat, closer, err := d.db.Get(k.Bytes())
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return nil, ErrNotFound
}
return nil, err
}
ret := make([]byte, len(dat))
copy(ret, dat)
closer.Close()
return ret, nil
}
func (d *PebbleKV) Close() error {
d.Lock()
defer d.Unlock()
return d.db.Close()
}
var _ KV = (*PebbleKV)(nil)
package kvstore
import (
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func TestPebbleKV(t *testing.T) {
tmp := t.TempDir() // automatically removed by testing cleanup
kv := NewPebbleKV(tmp)
t.Cleanup(func() { // Can't use defer because kvTest runs tests in parallel.
require.NoError(t, kv.Close())
})
kvTest(t, kv)
}
func TestPebbleKV_CreateMissingDirectory(t *testing.T) {
tmp := t.TempDir()
dir := filepath.Join(tmp, "data")
kv := NewPebbleKV(dir)
defer kv.Close()
val := []byte{1, 2, 3, 4}
key := crypto.Keccak256Hash(val)
require.NoError(t, kv.Put(key, val))
}
package types
type DataFormat string
const (
DataFormatFile DataFormat = "file"
DataFormatPebble DataFormat = "pebble"
)
var SupportedDataFormats = []DataFormat{DataFormatFile, DataFormatPebble}
...@@ -5,7 +5,7 @@ SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ...@@ -5,7 +5,7 @@ SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
COMPAT_DIR="${SCRIPTS_DIR}/../temp/compat" COMPAT_DIR="${SCRIPTS_DIR}/../temp/compat"
TESTNAME="${1?Must specify compat file to run}" TESTNAME="${1?Must specify compat file to run}"
BASEURL="${2:-https://github.com/ethereum-optimism/chain-test-data/releases/download/2024-08-02}" BASEURL="${2:-https://github.com/ethereum-optimism/chain-test-data/releases/download/2024-09-01}"
URL="${BASEURL}/${TESTNAME}.tar.bz" URL="${BASEURL}/${TESTNAME}.tar.bz"
...@@ -13,4 +13,4 @@ mkdir -p "${COMPAT_DIR}" ...@@ -13,4 +13,4 @@ mkdir -p "${COMPAT_DIR}"
curl --etag-save "${COMPAT_DIR}/${TESTNAME}-etag.txt" --etag-compare "${COMPAT_DIR}/${TESTNAME}-etag.txt" -L --fail -o "${COMPAT_DIR}/${TESTNAME}.tar.bz" "${URL}" curl --etag-save "${COMPAT_DIR}/${TESTNAME}-etag.txt" --etag-compare "${COMPAT_DIR}/${TESTNAME}-etag.txt" -L --fail -o "${COMPAT_DIR}/${TESTNAME}.tar.bz" "${URL}"
tar jxf "${COMPAT_DIR}/${TESTNAME}.tar.bz" -C "${COMPAT_DIR}" tar jxf "${COMPAT_DIR}/${TESTNAME}.tar.bz" -C "${COMPAT_DIR}"
# shellcheck disable=SC2046 # shellcheck disable=SC2046
"${SCRIPTS_DIR}/../bin/op-program" $(cat "${COMPAT_DIR}/${TESTNAME}/args.txt") "${SCRIPTS_DIR}/../bin/op-program" --data.format=pebble $(cat "${COMPAT_DIR}/${TESTNAME}/args.txt")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment