Commit b1c98fdc authored by Esad Akar's avatar Esad Akar Committed by GitHub

feat: warmup time for push/pull protocols (#2050)

parent 18bdc869
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
...@@ -64,6 +65,7 @@ const ( ...@@ -64,6 +65,7 @@ const (
optionNameFullNode = "full-node" optionNameFullNode = "full-node"
optionNamePostageContractAddress = "postage-stamp-address" optionNamePostageContractAddress = "postage-stamp-address"
optionNameBlockTime = "block-time" optionNameBlockTime = "block-time"
optionWarmUpTime = "warmup-time"
) )
func init() { func init() {
...@@ -236,6 +238,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) { ...@@ -236,6 +238,7 @@ func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameTransactionHash, "", "proof-of-identity transaction hash") cmd.Flags().String(optionNameTransactionHash, "", "proof-of-identity transaction hash")
cmd.Flags().Uint64(optionNameBlockTime, 15, "chain block time") cmd.Flags().Uint64(optionNameBlockTime, 15, "chain block time")
cmd.Flags().String(optionNameSwapDeploymentGasPrice, "", "gas price in wei to use for deployment and funding") cmd.Flags().String(optionNameSwapDeploymentGasPrice, "", "gas price in wei to use for deployment and funding")
cmd.Flags().Duration(optionWarmUpTime, time.Minute*10, "time to warmup the node before pull/push protocols can be kicked off.")
} }
func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) { func newLogger(cmd *cobra.Command, verbosity string) (logging.Logger, error) {
......
...@@ -154,6 +154,7 @@ Welcome to the Swarm.... Bzzz Bzzzz Bzzzz ...@@ -154,6 +154,7 @@ Welcome to the Swarm.... Bzzz Bzzzz Bzzzz
PostageContractAddress: c.config.GetString(optionNamePostageContractAddress), PostageContractAddress: c.config.GetString(optionNamePostageContractAddress),
BlockTime: c.config.GetUint64(optionNameBlockTime), BlockTime: c.config.GetUint64(optionNameBlockTime),
DeployGasPrice: c.config.GetString(optionNameSwapDeploymentGasPrice), DeployGasPrice: c.config.GetString(optionNameSwapDeploymentGasPrice),
WarmupTime: c.config.GetDuration(optionWarmUpTime),
}) })
if err != nil { if err != nil {
return err return err
......
...@@ -146,6 +146,7 @@ type Options struct { ...@@ -146,6 +146,7 @@ type Options struct {
PriceOracleAddress string PriceOracleAddress string
BlockTime uint64 BlockTime uint64
DeployGasPrice string DeployGasPrice string
WarmupTime time.Duration
} }
const ( const (
...@@ -173,6 +174,12 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, ...@@ -173,6 +174,12 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
} }
}() }()
// light nodes have zero warmup time for pull/pushsync protocols
warmupTime := o.WarmupTime
if !o.FullNodeMode {
warmupTime = 0
}
b = &Bee{ b = &Bee{
p2pCancel: p2pCancel, p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel), errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
...@@ -559,7 +566,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, ...@@ -559,7 +566,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
pinningService := pinning.NewService(storer, stateStore, traversalService) pinningService := pinning.NewService(storer, stateStore, traversalService)
pushSyncProtocol := pushsync.New(swarmAddress, p2ps, storer, kad, tagService, o.FullNodeMode, pssService.TryUnwrap, validStamp, logger, acc, pricer, signer, tracer) pushSyncProtocol := pushsync.New(swarmAddress, p2ps, storer, kad, tagService, o.FullNodeMode, pssService.TryUnwrap, validStamp, logger, acc, pricer, signer, tracer, warmupTime)
// set the pushSyncer in the PSS // set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol) pssService.SetPushSyncer(pushSyncProtocol)
...@@ -570,7 +577,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, ...@@ -570,7 +577,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler) b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler)
} }
pusherService := pusher.New(networkID, storer, kad, pushSyncProtocol, tagService, logger, tracer) pusherService := pusher.New(networkID, storer, kad, pushSyncProtocol, tagService, logger, tracer, warmupTime)
b.pusherCloser = pusherService b.pusherCloser = pusherService
pullStorage := pullstorage.New(storer) pullStorage := pullstorage.New(storer)
...@@ -580,7 +587,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, ...@@ -580,7 +587,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
var pullerService *puller.Puller var pullerService *puller.Puller
if o.FullNodeMode { if o.FullNodeMode {
pullerService := puller.New(stateStore, kad, pullSyncProtocol, logger, puller.Options{}) pullerService := puller.New(stateStore, kad, pullSyncProtocol, logger, puller.Options{}, warmupTime)
b.pullerCloser = pullerService b.pullerCloser = pullerService
} }
......
...@@ -50,7 +50,7 @@ type Puller struct { ...@@ -50,7 +50,7 @@ type Puller struct {
bins uint8 // how many bins do we support bins uint8 // how many bins do we support
} }
func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pullsync.Interface, logger logging.Logger, o Options) *Puller { func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pullsync.Interface, logger logging.Logger, o Options, warmupTime time.Duration) *Puller {
var ( var (
bins uint8 = swarm.MaxBins bins uint8 = swarm.MaxBins
) )
...@@ -77,7 +77,7 @@ func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pull ...@@ -77,7 +77,7 @@ func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pull
p.syncPeers[i] = make(map[string]*syncPeer) p.syncPeers[i] = make(map[string]*syncPeer)
} }
p.wg.Add(1) p.wg.Add(1)
go p.manage() go p.manage(warmupTime)
return p return p
} }
...@@ -86,7 +86,7 @@ type peer struct { ...@@ -86,7 +86,7 @@ type peer struct {
po uint8 po uint8
} }
func (p *Puller) manage() { func (p *Puller) manage(warmupTime time.Duration) {
defer p.wg.Done() defer p.wg.Done()
c, unsubscribe := p.topology.SubscribePeersChange() c, unsubscribe := p.topology.SubscribePeersChange()
defer unsubscribe() defer unsubscribe()
...@@ -96,6 +96,16 @@ func (p *Puller) manage() { ...@@ -96,6 +96,16 @@ func (p *Puller) manage() {
<-p.quit <-p.quit
cancel() cancel()
}() }()
// wait for warmup duration to complete
select {
case <-time.After(warmupTime):
case <-p.quit:
return
}
p.logger.Info("puller: warmup period complete, worker starting.")
for { for {
select { select {
case <-c: case <-c:
......
...@@ -597,7 +597,7 @@ func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *moc ...@@ -597,7 +597,7 @@ func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *moc
o := puller.Options{ o := puller.Options{
Bins: ops.bins, Bins: ops.bins,
} }
return puller.New(s, kad, ps, logger, o), s, kad, ps return puller.New(s, kad, ps, logger, o, 0), s, kad, ps
} }
type c struct { type c struct {
......
...@@ -49,7 +49,7 @@ var ( ...@@ -49,7 +49,7 @@ var (
var ErrInvalidAddress = errors.New("invalid address") var ErrInvalidAddress = errors.New("invalid address")
func New(networkID uint64, storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer pushsync.PushSyncer, tagger *tags.Tags, logger logging.Logger, tracer *tracing.Tracer) *Service { func New(networkID uint64, storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer pushsync.PushSyncer, tagger *tags.Tags, logger logging.Logger, tracer *tracing.Tracer, warmupTime time.Duration) *Service {
service := &Service{ service := &Service{
networkID: networkID, networkID: networkID,
storer: storer, storer: storer,
...@@ -61,13 +61,13 @@ func New(networkID uint64, storer storage.Storer, peerSuggester topology.Closest ...@@ -61,13 +61,13 @@ func New(networkID uint64, storer storage.Storer, peerSuggester topology.Closest
quit: make(chan struct{}), quit: make(chan struct{}),
chunksWorkerQuitC: make(chan struct{}), chunksWorkerQuitC: make(chan struct{}),
} }
go service.chunksWorker() go service.chunksWorker(warmupTime)
return service return service
} }
// chunksWorker is a loop that keeps looking for chunks that are locally uploaded ( by monitoring pushIndex ) // chunksWorker is a loop that keeps looking for chunks that are locally uploaded ( by monitoring pushIndex )
// and pushes them to the closest peer and get a receipt. // and pushes them to the closest peer and get a receipt.
func (s *Service) chunksWorker() { func (s *Service) chunksWorker(warmupTime time.Duration) {
var ( var (
chunks <-chan swarm.Chunk chunks <-chan swarm.Chunk
unsubscribe func() unsubscribe func()
...@@ -81,6 +81,7 @@ func (s *Service) chunksWorker() { ...@@ -81,6 +81,7 @@ func (s *Service) chunksWorker() {
span opentracing.Span span opentracing.Span
logger *logrus.Entry logger *logrus.Entry
) )
defer timer.Stop() defer timer.Stop()
defer close(s.chunksWorkerQuitC) defer close(s.chunksWorkerQuitC)
go func() { go func() {
...@@ -88,6 +89,15 @@ func (s *Service) chunksWorker() { ...@@ -88,6 +89,15 @@ func (s *Service) chunksWorker() {
cancel() cancel()
}() }()
// wait for warmup duration to complete
select {
case <-time.After(warmupTime):
case <-s.quit:
return
}
s.logger.Info("pusher: warmup period complete, worker starting.")
LOOP: LOOP:
for { for {
select { select {
......
...@@ -379,7 +379,7 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus ...@@ -379,7 +379,7 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus
} }
peerSuggester := mock.NewTopologyDriver(mockOpts...) peerSuggester := mock.NewTopologyDriver(mockOpts...)
pusherService := pusher.New(1, pusherStorer, peerSuggester, pushSyncService, mtags, logger, nil) pusherService := pusher.New(1, pusherStorer, peerSuggester, pushSyncService, mtags, logger, nil, 0)
return mtags, pusherService, pusherStorer return mtags, pusherService, pusherStorer
} }
......
...@@ -45,6 +45,7 @@ const ( ...@@ -45,6 +45,7 @@ const (
var ( var (
ErrOutOfDepthReplication = errors.New("replication outside of the neighborhood") ErrOutOfDepthReplication = errors.New("replication outside of the neighborhood")
ErrNoPush = errors.New("could not push chunk") ErrNoPush = errors.New("could not push chunk")
ErrWarmup = errors.New("node warmup time not complete")
) )
type PushSyncer interface { type PushSyncer interface {
...@@ -72,13 +73,14 @@ type PushSync struct { ...@@ -72,13 +73,14 @@ type PushSync struct {
signer crypto.Signer signer crypto.Signer
isFullNode bool isFullNode bool
failedRequests *failedRequestCache failedRequests *failedRequestCache
warmupPeriod time.Time
} }
var defaultTTL = 20 * time.Second // request time to live var defaultTTL = 20 * time.Second // request time to live
var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk
var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream
func New(address swarm.Address, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer) *PushSync { func New(address swarm.Address, streamer p2p.StreamerDisconnecter, storer storage.Putter, topology topology.Driver, tagger *tags.Tags, isFullNode bool, unwrap func(swarm.Chunk), validStamp func(swarm.Chunk, []byte) (swarm.Chunk, error), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer, warmupTime time.Duration) *PushSync {
ps := &PushSync{ ps := &PushSync{
address: address, address: address,
streamer: streamer, streamer: streamer,
...@@ -95,6 +97,7 @@ func New(address swarm.Address, streamer p2p.StreamerDisconnecter, storer storag ...@@ -95,6 +97,7 @@ func New(address swarm.Address, streamer p2p.StreamerDisconnecter, storer storag
validStamp: validStamp, validStamp: validStamp,
signer: signer, signer: signer,
failedRequests: newFailedRequestCache(), failedRequests: newFailedRequestCache(),
warmupPeriod: time.Now().Add(warmupTime),
} }
return ps return ps
} }
...@@ -197,6 +200,12 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -197,6 +200,12 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
receipt, err := ps.pushToClosest(ctx, chunk, false) receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil { if err != nil {
if errors.Is(err, topology.ErrWantSelf) { if errors.Is(err, topology.ErrWantSelf) {
if time.Now().Before(ps.warmupPeriod) {
err = ErrWarmup
return
}
if !storedChunk { if !storedChunk {
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk) _, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil { if err != nil {
......
...@@ -804,7 +804,7 @@ func createPushSyncNodeWithAccounting(t *testing.T, addr swarm.Address, prices p ...@@ -804,7 +804,7 @@ func createPushSyncNodeWithAccounting(t *testing.T, addr swarm.Address, prices p
return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil
} }
return pushsync.New(addr, recorderDisconnecter, storer, mockTopology, mtag, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil), storer, mtag return pushsync.New(addr, recorderDisconnecter, storer, mockTopology, mtag, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil, 0), storer, mtag
} }
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) { func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment