Commit f603d8bf authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/challenger-packages

parents fe394d7d 2ae08af9
......@@ -30,13 +30,13 @@ func NewApi(logger log.Logger, bv database.BridgeTransfersView) *Api {
}
func (a *Api) Listen(ctx context.Context, port int) error {
a.log.Info("starting api server", "port", port)
a.log.Info("api server listening...", "port", port)
server := http.Server{Addr: fmt.Sprintf(":%d", port), Handler: a.Router}
err := httputil.ListenAndServeContext(ctx, &server)
if err != nil {
a.log.Error("api server shutdown", "err", err)
a.log.Error("api server stopped", "err", err)
} else {
a.log.Info("api server shutdown")
a.log.Info("api server stopped")
}
return err
......
package main
import (
"context"
"sync"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
......@@ -25,67 +24,73 @@ var (
)
func runIndexer(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
cfg, err := config.LoadConfig(logger, ctx.String(ConfigFlag.Name))
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "indexer")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
}
indexer, err := indexer.NewIndexer(logger, cfg.Chain, cfg.RPCs, db)
indexer, err := indexer.NewIndexer(log, cfg.Chain, cfg.RPCs, db)
if err != nil {
log.Error("failed to create indexer", "err", err)
return err
}
indexerCtx, indexerCancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
logger.Error("caught interrupt, shutting down...")
indexerCancel()
}()
return indexer.Run(indexerCtx)
return indexer.Run(ctx.Context)
}
func runApi(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
cfg, err := config.LoadConfig(logger, ctx.String(ConfigFlag.Name))
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
logger.Crit("Failed to connect to database", "err", err)
log.Error("failed to connect to database", "err", err)
return err
}
apiCtx, apiCancel := context.WithCancel(context.Background())
api := api.NewApi(logger, db.BridgeTransfers)
go func() {
opio.BlockOnInterrupts()
logger.Error("caught interrupt, shutting down...")
apiCancel()
}()
return api.Listen(apiCtx, cfg.API.Port)
api := api.NewApi(log, db.BridgeTransfers)
return api.Listen(ctx.Context, cfg.API.Port)
}
func runAll(ctx *cli.Context) error {
// Run the indexer
log := log.NewLogger(log.ReadCLIConfig(ctx))
// Ensure both processes complete before returning.
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
err := runApi(ctx)
if err != nil {
log.Error("api process non-zero exit", "err", err)
}
}()
go func() {
if err := runIndexer(ctx); err != nil {
log.NewLogger(log.ReadCLIConfig(ctx)).Error("Error running the indexer", "err", err)
defer wg.Done()
err := runIndexer(ctx)
if err != nil {
log.Error("indexer process non-zero exit", "err", err)
}
}()
// Run the API and return its error, if any
return runApi(ctx)
// We purposefully return no error since the indexer and api
// have no inter-dependencies. We simply rely on the logs to
// report a non-zero exit for either process.
wg.Wait()
return nil
}
func newCli(GitCommit string, GitDate string) *cli.App {
......@@ -108,6 +113,12 @@ func newCli(GitCommit string, GitDate string) *cli.App {
Description: "Runs the indexing service",
Action: runIndexer,
},
{
Name: "all",
Flags: flags,
Description: "Runs both the api service and the indexing service",
Action: runAll,
},
{
Name: "version",
Description: "print version",
......@@ -116,12 +127,6 @@ func newCli(GitCommit string, GitDate string) *cli.App {
return nil
},
},
{
Name: "all",
Flags: flags,
Description: "Runs both the api service and the indexing service",
Action: runAll,
},
},
}
}
package main
import (
"context"
"os"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/log"
)
......@@ -12,8 +14,16 @@ var (
)
func main() {
// This is the most root context, used to propagate
// cancellations to all spawned application-level goroutines
ctx, cancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
cancel()
}()
app := newCli(GitCommit, GitDate)
if err := app.Run(os.Args); err != nil {
log.Crit("application failed", "err", err)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err)
}
}
......@@ -84,39 +84,39 @@ func (i *Indexer) Run(ctx context.Context) error {
var wg sync.WaitGroup
errCh := make(chan error, 3)
// If either processor errors out, we stop
// if any goroutine halts, we stop the entire indexer
subCtx, cancel := context.WithCancel(ctx)
run := func(start func(ctx context.Context) error) {
wg.Add(1)
defer func() {
if err := recover(); err != nil {
i.log.Error("halting indexer on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
cancel()
wg.Done()
go func() {
defer func() {
if err := recover(); err != nil {
i.log.Error("halting indexer on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
cancel()
wg.Done()
}()
errCh <- start(subCtx)
}()
err := start(subCtx)
if err != nil {
i.log.Error("halting indexer on error", "err", err)
}
// Send a value down regardless if we've received an error
// or halted via cancellation where err == nil
errCh <- err
}
// Kick off all the dependent routines
go run(i.L1ETL.Start)
go run(i.L2ETL.Start)
go run(i.BridgeProcessor.Start)
run(i.L1ETL.Start)
run(i.L2ETL.Start)
run(i.BridgeProcessor.Start)
wg.Wait()
err := <-errCh
if err != nil {
i.log.Error("indexer stopped", "err", err)
} else {
i.log.Info("indexer stopped")
}
wg.Wait()
i.log.Info("indexer stopped")
return err
}
......
......@@ -136,7 +136,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing indexed withdrawal on prove event!", "withdrawal_hash", proven.WithdrawalHash, "tx_hash", proven.Event.TransactionHash)
log.Error("missing indexed withdrawal on prove event!", "withdrawal_hash", proven.WithdrawalHash, "tx_hash", proven.Event.TransactionHash)
return errors.New("missing indexed withdrawal")
}
......@@ -161,7 +161,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing indexed withdrawal on finalization event!", "withdrawal_hash", finalized.WithdrawalHash, "tx_hash", finalized.Event.TransactionHash)
log.Error("missing indexed withdrawal on finalization event!", "withdrawal_hash", finalized.WithdrawalHash, "tx_hash", finalized.Event.TransactionHash)
return errors.New("missing indexed withdrawal")
}
......@@ -188,7 +188,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if message == nil {
log.Crit("missing indexed L2CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
log.Error("missing indexed L2CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
return fmt.Errorf("missing indexed L2CrossDomainMessager message")
}
......@@ -225,7 +225,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
log.Error("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
return errors.New("missing L2StandardBridge withdrawal on L1 finalization")
}
}
......
......@@ -137,7 +137,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if err != nil {
return err
} else if message == nil {
log.Crit("missing indexed L1CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
log.Error("missing indexed L1CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
return fmt.Errorf("missing indexed L1CrossDomainMessager message")
}
......@@ -174,7 +174,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if err != nil {
return err
} else if deposit == nil {
log.Crit("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
log.Error("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
return errors.New("missing L1StandardBridge deposit on L2 finalization")
}
}
......
......@@ -48,6 +48,7 @@ finalization.
- [Security Considerations](#security-considerations)
- [Key Properties of Withdrawal Verification](#key-properties-of-withdrawal-verification)
- [Handling Successfully Verified Messages That Fail When Relayed](#handling-successfully-verified-messages-that-fail-when-relayed)
- [OptimismPortal can send abitrary messages on L1](#optimismportal-can-send-abitrary-messages-on-l1)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
......@@ -216,3 +217,15 @@ contracts if desired.
[`WithdrawalTransaction` type]: https://github.com/ethereum-optimism/optimism/blob/08daf8dbd38c9ffdbd18fc9a211c227606cdb0ad/packages/contracts-bedrock/src/libraries/Types.sol#L62-L69
[`OutputRootProof` type]: https://github.com/ethereum-optimism/optimism/blob/08daf8dbd38c9ffdbd18fc9a211c227606cdb0ad/packages/contracts-bedrock/src/libraries/Types.sol#L25-L30
### OptimismPortal can send abitrary messages on L1
The `L2ToL1MessagePasser` contract's `initiateWithdrawal` function accepts a `_target` address and `_data` bytes,
which is passed to a `CALL` opcode on L1 when `finalizeWithdrawalTransaction` is called after the challenge
period. This means that, by design, the `OptimismPortal` contract can be used to send arbitrary transactions on
the L1, with the `OptimismPortal` as the `msg.sender`.
This means users of the `OptimismPortal` contract should be careful what permissions they grant to the portal.
For example, any ERC20 tokens mistakenly sent to the `OptimismPortal` contract are essentially lost, as they can
be claimed by anybody that pre-approves transfers of this token out of the portal, using the L2 to initiate the
approval and the L1 to prove and finalize the approval (after the challenge period).
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment