Commit 43b47c22 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #4990 from ethereum-optimism/seb/batcher-ch-duration

op-batcher: Add channel duration limiting
parents 377e7823 fb6750a7
......@@ -525,8 +525,7 @@ jobs:
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=true OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \
./... \
-- -timeout=20m
-- -timeout=20m ./...
working_directory: <<parameters.module>>
- store_test_results:
path: /tmp/test-results
......
......@@ -18,6 +18,15 @@ type ChannelConfig struct {
// The maximum number of L1 blocks that the inclusion transactions of a
// channel's frames can span.
ChannelTimeout uint64
// Builder Config
// MaxChannelDuration is the maximum duration (in #L1-blocks) to keep the
// channel open. This allows control over how long a channel is kept open
// during times of low transaction volume.
//
// If 0, duration checks are disabled.
MaxChannelDuration uint64
// The batcher tx submission safety margin (in #L1-blocks) to subtract from
// a channel's timeout and sequencing window, to guarantee safe inclusion of
// a channel on L1.
......@@ -50,12 +59,17 @@ func (c ChannelConfig) InputThreshold() uint64 {
type channelBuilder struct {
cfg ChannelConfig
// L1 block timestamp of combined channel & sequencing window timeout. 0 if
// no timeout set yet.
// L1 block number timeout of combined
// - channel duration timeout,
// - consensus channel timeout,
// - sequencing window timeout.
// 0 if no block number timeout set yet.
timeout uint64
// reason for currently set timeout
timeoutReason error
// marked as full if a) max RLP input bytes, b) max num frames or c) max
// allowed frame index (uint16) has been reached
// Reason for the channel being full. Set by setFullErr so it's always
// guaranteed to be a ChannelFullError wrapping the specific reason.
fullErr error
// current channel
co *derive.ChannelOut
......@@ -102,28 +116,6 @@ func (c *channelBuilder) Reset() error {
return c.co.Reset()
}
// FramePublished calculates the submission timeout of this channel from the
// given frame inclusion L1-block number. If an older frame tx has already been
// seen, the timeout is not updated.
func (c *channelBuilder) FramePublished(l1BlockNum uint64) {
timeout := l1BlockNum + c.cfg.ChannelTimeout - c.cfg.SubSafetyMargin
c.updateTimeout(timeout)
}
// TimedOut returns whether the passed block number is after the channel timeout
// block. If no block timeout is set yet, it returns false.
func (c *channelBuilder) TimedOut(blockNum uint64) bool {
return c.timeout != 0 && blockNum >= c.timeout
}
// CheckTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full with reason ErrChannelTimedOut.
func (c *channelBuilder) CheckTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(ErrChannelTimedOut)
}
}
// AddBlock adds a block to the channel compression pipeline. IsFull should be
// called aftewards to test whether the channel is full. If full, a new channel
// must be started.
......@@ -151,7 +143,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) error {
c.blocks = append(c.blocks, block)
c.updateSwTimeout(batch)
if c.InputTargetReached() {
if c.inputTargetReached() {
c.setFullErr(ErrInputTargetReached)
// Adding this block still worked, so don't return error, just mark as full
}
......@@ -159,25 +151,76 @@ func (c *channelBuilder) AddBlock(block *types.Block) error {
return nil
}
// Timeout management
// RegisterL1Block should be called whenever a new L1-block is seen.
//
// It ensures proper tracking of all possible timeouts (max channel duration,
// close to consensus channel timeout, close to end of sequencing window).
func (c *channelBuilder) RegisterL1Block(l1BlockNum uint64) {
c.updateDurationTimeout(l1BlockNum)
c.checkTimeout(l1BlockNum)
}
// FramePublished should be called whenever a frame of this channel got
// published with the L1-block number of the block that the frame got included
// in.
func (c *channelBuilder) FramePublished(l1BlockNum uint64) {
timeout := l1BlockNum + c.cfg.ChannelTimeout - c.cfg.SubSafetyMargin
c.updateTimeout(timeout, ErrChannelTimeoutClose)
}
// updateDurationTimeout updates the block timeout with the channel duration
// timeout derived from the given L1-block number. The timeout is only moved
// forward if the derived timeout is earlier than the currently set timeout.
//
// It does nothing if the max channel duration is set to 0.
func (c *channelBuilder) updateDurationTimeout(l1BlockNum uint64) {
if c.cfg.MaxChannelDuration == 0 {
return
}
timeout := l1BlockNum + c.cfg.MaxChannelDuration
c.updateTimeout(timeout, ErrMaxDurationReached)
}
// updateSwTimeout updates the block timeout with the sequencer window timeout
// derived from the batch's origin L1 block. The timeout is only moved forward
// if the derived sequencer window timeout is earlier than the current.
// if the derived sequencer window timeout is earlier than the currently set
// timeout.
func (c *channelBuilder) updateSwTimeout(batch *derive.BatchData) {
timeout := uint64(batch.EpochNum) + c.cfg.SeqWindowSize - c.cfg.SubSafetyMargin
c.updateTimeout(timeout)
c.updateTimeout(timeout, ErrSeqWindowClose)
}
// updateTimeout updates the timeout block to the given block number if it is
// earlier then the current block timeout, or if it still unset.
func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64) {
// earlier than the current block timeout, or if it still unset.
//
// If the timeout is updated, the provided reason will be set as the channel
// full error reason in case the timeout is hit in the future.
func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64, reason error) {
if c.timeout == 0 || c.timeout > timeoutBlockNum {
c.timeout = timeoutBlockNum
c.timeoutReason = reason
}
}
// InputTargetReached says whether the target amount of input data has been
// checkTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full, if it wasn't full alredy.
func (c *channelBuilder) checkTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(c.timeoutReason)
}
}
// TimedOut returns whether the passed block number is after the timeout block
// number. If no block timeout is set yet, it returns false.
func (c *channelBuilder) TimedOut(blockNum uint64) bool {
return c.timeout != 0 && blockNum >= c.timeout
}
// inputTargetReached says whether the target amount of input data has been
// reached in this channel builder. No more blocks can be added afterwards.
func (c *channelBuilder) InputTargetReached() bool {
func (c *channelBuilder) inputTargetReached() bool {
return uint64(c.co.InputBytes()) >= c.cfg.InputThreshold()
}
......@@ -190,14 +233,16 @@ func (c *channelBuilder) IsFull() bool {
// FullErr returns the reason why the channel is full. If not full yet, it
// returns nil.
//
// It returns a ChannelFullError wrapping one of four possible reasons for the
// It returns a ChannelFullError wrapping one of six possible reasons for the
// channel being full:
// - ErrInputTargetReached if the target amount of input data has been reached,
// - derive.MaxRLPBytesPerChannel if the general maximum amount of input data
// would have been exceeded by the latest AddBlock call,
// - ErrMaxFrameIndex if the maximum number of frames has been generated
// (uint16),
// - ErrChannelTimedOut if the batcher channel timeout has been reached.
// - ErrMaxDurationReached if the max channel duration got reached.
// - ErrChannelTimeoutClose if the consensus channel timeout got too close.
// - ErrSeqWindowClose if the end of the sequencer window got too close.
func (c *channelBuilder) FullErr() error {
return c.fullErr
}
......@@ -210,9 +255,9 @@ func (c *channelBuilder) setFullErr(err error) {
// after AddBlock and before iterating over available frames with HasFrame and
// NextFrame.
//
// If the input data target hasn't been reached yet, it will conservatively only
// If the channel isn't full yet, it will conservatively only
// pull readily available frames from the compression output.
// If the target has been reached, the channel is closed and all remaining
// If it is full, the channel is closed and all remaining
// frames will be created, possibly with a small leftover frame.
func (c *channelBuilder) OutputFrames() error {
if c.IsFull() {
......@@ -318,9 +363,11 @@ func (c *channelBuilder) PushFrame(id txID, frame []byte) {
}
var (
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrChannelTimedOut = errors.New("channel timed out")
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
)
type ChannelFullError struct {
......
......@@ -188,9 +188,6 @@ func (s *channelManager) nextTxData() ([]byte, txID, error) {
// It currently only uses one frame per transaction. If the pending channel is
// full, it only returns the remaining frames of this channel until it got
// successfully fully sent to L1. It returns io.EOF if there's no pending frame.
//
// It currently ignores the l1Head provided and doesn't track channel timeouts
// or the sequencer window span yet.
func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) {
dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame()
s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks))
......@@ -211,12 +208,15 @@ func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) {
return nil, txID{}, err
}
s.checkTimeout(l1Head)
if err := s.processBlocks(); err != nil {
return nil, txID{}, err
}
// Register current L1 head only after all pending blocks have been
// processed. Even if a timeout will be triggered now, it is better to have
// all pending blocks be included in this channel for submission.
s.registerL1Block(l1Head)
if err := s.pendingChannel.OutputFrames(); err != nil {
return nil, txID{}, fmt.Errorf("creating frames with channel builder: %w", err)
}
......@@ -239,14 +239,13 @@ func (s *channelManager) ensurePendingChannel(l1Head eth.BlockID) error {
return nil
}
// checkTimeout checks the block timeout on the pending channel.
func (s *channelManager) checkTimeout(l1Head eth.BlockID) {
s.pendingChannel.CheckTimeout(l1Head.Number)
ferr := s.pendingChannel.FullErr()
s.log.Debug("timeout triggered",
// registerL1Block registers the given block at the pending channel.
func (s *channelManager) registerL1Block(l1Head eth.BlockID) {
s.pendingChannel.RegisterL1Block(l1Head.Number)
s.log.Debug("new L1-block registered at channel builder",
"l1Head", l1Head,
"timed_out", errors.Is(ferr, ErrChannelTimedOut),
"full_reason", ferr,
"channel_full", s.pendingChannel.IsFull(),
"full_reason", s.pendingChannel.FullErr(),
)
}
......
......@@ -47,6 +47,16 @@ type CLIConfig struct {
// RollupRpc is the HTTP provider URL for the L2 rollup node.
RollupRpc string
// MaxChannelDuration is the maximum duration (in #L1-blocks) to keep a
// channel open. This allows to more eagerly send batcher transactions
// during times of low L2 transaction volume. Note that the effective
// L1-block distance between batcher transactions is then MaxChannelDuration
// + NumConfirmations because the batcher waits for NumConfirmations blocks
// after sending a batcher tx and only then starts a new channel.
//
// If 0, duration checks are disabled.
MaxChannelDuration uint64
// The batcher tx submission safety margin (in #L1-blocks) to subtract from
// a channel's timeout and sequencing window, to guarantee safe inclusion of
// a channel on L1.
......@@ -143,18 +153,19 @@ func NewConfig(ctx *cli.Context) CLIConfig {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
/* Optional Flags */
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
}
}
......@@ -88,13 +88,14 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*BatchSubmitte
From: fromAddress,
Rollup: rcfg,
Channel: ChannelConfig{
SeqWindowSize: rcfg.SeqWindowSize,
ChannelTimeout: rcfg.ChannelTimeout,
SubSafetyMargin: cfg.SubSafetyMargin,
MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version
TargetFrameSize: cfg.TargetL1TxSize - 1, // subtract 1 byte for version
TargetNumFrames: cfg.TargetNumFrames,
ApproxComprRatio: cfg.ApproxComprRatio,
SeqWindowSize: rcfg.SeqWindowSize,
ChannelTimeout: rcfg.ChannelTimeout,
MaxChannelDuration: cfg.MaxChannelDuration,
SubSafetyMargin: cfg.SubSafetyMargin,
MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version
TargetFrameSize: cfg.TargetL1TxSize - 1, // subtract 1 byte for version
TargetNumFrames: cfg.TargetNumFrames,
ApproxComprRatio: cfg.ApproxComprRatio,
},
}
......
......@@ -75,6 +75,12 @@ var (
/* Optional flags */
MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
Value: 0,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MAX_CHANNEL_DURATION"),
}
MaxL1TxSizeBytesFlag = cli.Uint64Flag{
Name: "max-l1-tx-size-bytes",
Usage: "The maximum size of a batch tx submitted to L1.",
......@@ -96,7 +102,7 @@ var (
ApproxComprRatioFlag = cli.Float64Flag{
Name: "approx-compr-ratio",
Usage: "The approximate compression ratio (<= 1.0)",
Value: 1.0,
Value: 0.4,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "APPROX_COMPR_RATIO"),
}
StoppedFlag = cli.BoolFlag{
......@@ -135,6 +141,7 @@ var requiredFlags = []cli.Flag{
}
var optionalFlags = []cli.Flag{
MaxChannelDurationFlag,
MaxL1TxSizeBytesFlag,
TargetL1TxSizeBytesFlag,
TargetNumFramesFlag,
......
......@@ -323,11 +323,12 @@ func TestMigration(t *testing.T) {
L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 624,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
SubSafetyMargin: testSafetyMargin(deployCfg),
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
......
......@@ -531,11 +531,12 @@ func (cfg SystemConfig) Start() (*System, error) {
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 160, //624,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
SubSafetyMargin: testSafetyMargin(cfg.DeployConfig),
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
......@@ -575,24 +576,3 @@ func hexPriv(in *ecdsa.PrivateKey) string {
b := e2eutils.EncodePrivKey(in)
return hexutil.Encode(b)
}
// returns a safety margin that heuristically leads to a short channel lifetime
// of netChannelDuration. In current testing setups, we want channels to close
// quickly to have a low latency. We don't optimize for gas consumption.
func testSafetyMargin(cfg *genesis.DeployConfig) uint64 {
// target channel duration after first frame is included on L1
const netChannelDuration = 2
// The sequencing window timeout starts from the L1 origin, whereas the
// channel timeout starts from the first L1 inclusion block of any frame.
// So to have comparable values, the sws is converted to an effective
// sequencing window from the first L1 inclusion block, assuming that L2
// blocks are quickly included on L1.
// So we subtract 1 block distance from the origin block and 1 block for
// minging the first frame.
openChannelSeqWindow := cfg.SequencerWindowSize - 2
if openChannelSeqWindow > cfg.ChannelTimeout {
return cfg.ChannelTimeout - netChannelDuration
} else {
return openChannelSeqWindow - netChannelDuration
}
}
......@@ -367,9 +367,9 @@ func TestFinalize(t *testing.T) {
l2Seq := sys.Clients["sequencer"]
// as configured in the extra geth lifecycle in testing setup
finalizedDistance := uint64(8)
const finalizedDistance = 8
// Wait enough time for L1 to finalize and L2 to confirm its data in finalized L1 blocks
<-time.After(time.Duration((finalizedDistance+4)*cfg.DeployConfig.L1BlockTime) * time.Second)
time.Sleep(time.Duration((finalizedDistance+6)*cfg.DeployConfig.L1BlockTime) * time.Second)
// fetch the finalizes head of geth
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
......@@ -883,7 +883,7 @@ func TestWithdrawals(t *testing.T) {
require.Nil(t, err)
// Get l2BlockNumber for proof generation
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber)
require.Nil(t, err)
......
......@@ -415,7 +415,6 @@ func TestMixedDepositValidity(t *testing.T) {
// TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are
// rejected while unmodified ones are accepted. This runs test cases in different systems.
func TestMixedWithdrawalValidity(t *testing.T) {
parallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
......@@ -425,7 +424,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
for i := 0; i <= 8; i++ {
i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
t.Parallel()
parallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FinalizationPeriodSeconds = 6
......@@ -528,7 +527,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
transactor.ExpectedL2Nonce = transactor.ExpectedL2Nonce + 1
// Wait for the finalization period, then we can finalize this withdrawal.
ctx, cancel = context.WithTimeout(context.Background(), 25*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber)
cancel()
require.Nil(t, err)
......@@ -658,7 +657,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
// Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
ctx, cancel = context.WithTimeout(context.Background(), 45*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
_, err = withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, header.Number)
require.Nil(t, err)
......
......@@ -120,11 +120,12 @@ services:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2:8545
OP_BATCHER_ROLLUP_RPC: http://op-node:8545
OP_BATCHER_MAX_CHANNEL_DURATION: 1
OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000
OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 624
OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 100000
OP_BATCHER_TARGET_NUM_FRAMES: 1
OP_BATCHER_APPROX_COMPR_RATIO: 1.0
OP_BATCHER_SUB_SAFETY_MARGIN: 6 # SWS is 15, ChannelTimeout is 40
OP_BATCHER_APPROX_COMPR_RATIO: 0.4
OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40
OP_BATCHER_POLL_INTERVAL: 1s
OP_BATCHER_NUM_CONFIRMATIONS: 1
OP_BATCHER_SAFE_ABORT_NONCE_TOO_LOW_COUNT: 3
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment